ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
ddfcea69-3e8f-447d-969f-24dcf59ba273 | cpp | tensorflow/tensorflow | input_split_metadata | tensorflow/core/kernels/batching_util/input_split_metadata.cc | tensorflow/core/kernels/batching_util/input_split_metadata_test.cc | #include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include <algorithm>
#include "absl/container/fixed_array.h"
#include "absl/strings/str_join.h"
namespace tensorflow {
namespace serving {
namespace internal {
namespace {
int compute_task_size_from_open_batch(int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit) {
return (open_batch_remaining_slot > 0)
? (input_task_size + batch_size_limit - open_batch_remaining_slot)
: input_task_size;
}
int compute_head_task_size(int input_task_size, int open_batch_remaining_slot,
int batch_size_limit) {
if (open_batch_remaining_slot == 0) {
return std::min(input_task_size, batch_size_limit);
}
return std::min(open_batch_remaining_slot, input_task_size);
}
int compute_tail_task_size(int task_size_from_open_batch, int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit) {
int tail_task_size;
if (input_task_size <= open_batch_remaining_slot) {
tail_task_size = input_task_size;
} else {
tail_task_size = task_size_from_open_batch % batch_size_limit;
if (tail_task_size == 0) {
tail_task_size = batch_size_limit;
}
}
return tail_task_size;
}
int compute_num_batches(int task_size_from_open_batch, int batch_size_limit) {
return (task_size_from_open_batch + batch_size_limit - 1) / batch_size_limit;
}
}
InputSplitMetadata::InputSplitMetadata(int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit)
: task_sizes_(generate_task_sizes(
input_task_size, open_batch_remaining_slot, batch_size_limit)) {}
const absl::FixedArray<int>& InputSplitMetadata::task_sizes() const {
return task_sizes_;
}
std::string InputSplitMetadata::DebugString() const {
return absl::StrJoin(task_sizes_, ", ");
}
absl::FixedArray<int> InputSplitMetadata::generate_task_sizes(
int input_task_size, int open_batch_remaining_slot,
int batch_size_limit) const {
const int task_size_from_open_batch = compute_task_size_from_open_batch(
input_task_size, open_batch_remaining_slot, batch_size_limit);
const int num_batches =
compute_num_batches(task_size_from_open_batch, batch_size_limit);
absl::FixedArray<int> task_sizes(num_batches, batch_size_limit);
task_sizes.front() = compute_head_task_size(
input_task_size, open_batch_remaining_slot, batch_size_limit);
task_sizes.back() =
compute_tail_task_size(task_size_from_open_batch, input_task_size,
open_batch_remaining_slot, batch_size_limit);
return task_sizes;
}
}
}
} | #include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace internal {
namespace {
TEST(InputSplitUtilTest, Basic) {
for (const auto& batch_task_param :
{std::tuple<int , int ,
int , int ,
int ,
int ,
int >{5, 1, 1, 5, 4, 1,
1},
{10, 3, 4, 3, 2, 3, 3},
{20, 5, 6, 4, 3, 5, 3},
{30, 0, 11, 3, 3, 11, 8},
{5, 6, 8, 1, 0, 5, 5}}) {
const int input_size = std::get<0>(batch_task_param);
const int open_batch_remaining_slot = std::get<1>(batch_task_param);
const int batch_size_limit = std::get<2>(batch_task_param);
const int expected_num_batches = std::get<3>(batch_task_param);
const int expected_head_batch_task_size = std::get<5>(batch_task_param);
const int expected_tail_batch_task_size = std::get<6>(batch_task_param);
ASSERT_LE(open_batch_remaining_slot, batch_size_limit);
InputSplitMetadata input_split_metadata(
input_size, open_batch_remaining_slot, batch_size_limit);
EXPECT_EQ(input_split_metadata.task_sizes().size(), expected_num_batches);
absl::FixedArray<int> expected_task_sizes(expected_num_batches);
for (int i = 0; i < expected_num_batches; i++) {
if (i == 0) {
expected_task_sizes[i] = expected_head_batch_task_size;
} else if (i == expected_num_batches - 1) {
expected_task_sizes[i] = expected_tail_batch_task_size;
} else {
expected_task_sizes[i] = batch_size_limit;
}
}
EXPECT_THAT(input_split_metadata.task_sizes(),
::testing::ElementsAreArray(expected_task_sizes));
EXPECT_EQ(input_split_metadata.DebugString(),
absl::StrJoin(expected_task_sizes, ", "));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/input_split_metadata.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/input_split_metadata_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bf3e0057-6b6b-4aa4-8a31-26a70b5a6de1 | cpp | tensorflow/tensorflow | batch_scheduler | tensorflow/core/kernels/batching_util/batch_scheduler.cc | tensorflow/core/kernels/batching_util/batch_scheduler_test.cc | #include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace serving {
absl::StatusOr<MixedPriorityBatchingPolicy> GetMixedPriorityBatchingPolicy(
absl::string_view attr_value) {
if (attr_value == kLowPriorityPaddingWithMaxBatchSizeAttrValue) {
return MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
} else if (attr_value ==
kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue) {
return MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize;
} else if (attr_value == kPriorityIsolationAttrValue) {
return MixedPriorityBatchingPolicy::kPriorityIsolation;
}
return absl::InvalidArgumentError(absl::StrFormat(
"Unknown mixed priority batching policy: %s", attr_value));
}
}
} | #include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Pointer;
using ::testing::Property;
TEST(MixedPriorityBatchingPolicyTest, InvalidAttrValueError) {
EXPECT_THAT(
GetMixedPriorityBatchingPolicy("invalid_attr_value"),
testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"Unknown mixed priority batching policy: invalid_attr_value")));
}
using MixedPriorityBatchingPolicyParameterizedTest = ::testing::TestWithParam<
std::tuple<std::string, MixedPriorityBatchingPolicy>>;
TEST_P(MixedPriorityBatchingPolicyParameterizedTest,
GetMixedPriorityBatchingPolicySuccess) {
auto [attr_name, policy] = GetParam();
EXPECT_THAT(GetMixedPriorityBatchingPolicy(attr_name),
testing::IsOkAndHolds(Eq(policy)));
}
INSTANTIATE_TEST_SUITE_P(
Parameter, MixedPriorityBatchingPolicyParameterizedTest,
::testing::Values(
std::make_tuple(
kLowPriorityPaddingWithMaxBatchSizeAttrValue,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithMaxBatchSize),
std::make_tuple(
kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize),
std::make_tuple(
kPriorityIsolationAttrValue,
MixedPriorityBatchingPolicy::kPriorityIsolation)));
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
private:
const size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
TEST(TaskCriticalityTest, CriticalityDefaultsToCritical) {
FakeTask fake_task(0);
EXPECT_EQ(fake_task.criticality(), tsl::criticality::Criticality::kCritical);
}
TEST(TaskQueueTest, EmptyTaskQueue) {
TaskQueue<FakeTask> task_queue;
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, AddTaskToTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
}
TEST(TaskQueueTest, AddTasksToTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
}
TEST(TaskQueueTest, RemoveTaskFromTaskQueueWithSingleTask) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(),
Pointee(Property(&FakeTask::size, Eq(1))));
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, RemoveTaskFromTaskQueueWithMultipleTasks) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(2), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(2, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(1), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(),
Pointee(Property(&FakeTask::size, Eq(2))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
}
TEST(TaskQueueTest, RemoveTasksFromTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(3),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
}
TEST(TaskQueueTest, RemoveTasksFewerThanArgFromTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(5),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
}
TEST(TaskQueueTest, RemoveAllTasksWhenArgGreaterThanTaskSize) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(8),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2))),
Pointee(Property(&FakeTask::size, Eq(3)))));
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, EarliestStartTimeWithEmptyQueue) {
TaskQueue<FakeTask> task_queue;
EXPECT_FALSE(task_queue.EarliestTaskStartTime().has_value());
}
TEST(TaskQueueTest, EarliestStartTimeWithMultipleTasksInQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
std::optional<uint64_t> result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 1);
}
TEST(TaskQueueTest, EarliestStartTimeAfterTaskRemoval) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
std::optional<uint64_t> result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 1);
EXPECT_THAT(task_queue.RemoveTask(3),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 3);
}
TEST(BatchTest, Basic) {
Batch<FakeTask> batch;
EXPECT_EQ(0, batch.num_tasks());
EXPECT_TRUE(batch.empty());
EXPECT_EQ(0, batch.size());
EXPECT_FALSE(batch.IsClosed());
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
EXPECT_EQ(1, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size(), batch.size());
EXPECT_EQ(task0->size(), batch.task(0).size());
EXPECT_FALSE(batch.IsClosed());
auto task1 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
EXPECT_EQ(2, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size() + task1->size(), batch.size());
EXPECT_EQ(task1->size(), batch.task(1).size());
EXPECT_EQ(task1->size(), batch.mutable_task(1)->size());
EXPECT_FALSE(batch.IsClosed());
batch.Close();
EXPECT_TRUE(batch.IsClosed());
EXPECT_EQ(2, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size() + task1->size(), batch.size());
EXPECT_EQ(task0->size(), batch.task(0).size());
EXPECT_EQ(task1->size(), batch.task(1).size());
EXPECT_EQ(7, batch.RemoveTask()->size());
EXPECT_EQ(3, batch.size());
EXPECT_EQ(3, batch.RemoveTask()->size());
EXPECT_EQ(0, batch.size());
EXPECT_TRUE(batch.empty());
}
TEST(BatchTest, WaitUntilClosed) {
Batch<FakeTask> batch;
batch.AddTask(std::unique_ptr<FakeTask>(new FakeTask(3)));
EXPECT_FALSE(batch.IsClosed());
std::unique_ptr<Thread> close_thread(
Env::Default()->StartThread(ThreadOptions(), "test", [&batch]() {
Env::Default()->SleepForMicroseconds(100);
batch.Close();
}));
batch.WaitUntilClosed();
EXPECT_TRUE(batch.IsClosed());
}
TEST(BatchTest, DeletionBlocksUntilClosed) {
Batch<FakeTask>* batch = new Batch<FakeTask>;
batch->AddTask(std::unique_ptr<FakeTask>(new FakeTask(3)));
EXPECT_FALSE(batch->IsClosed());
Notification do_delete, deleted;
std::unique_ptr<Thread> delete_thread(Env::Default()->StartThread(
ThreadOptions(), "test", [&batch, &do_delete, &deleted]() {
do_delete.WaitForNotification();
delete batch;
deleted.Notify();
}));
do_delete.Notify();
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(deleted.HasBeenNotified());
batch->Close();
deleted.WaitForNotification();
}
TEST(BatchTest, RemoveAllTasks) {
Batch<FakeTask> batch;
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
auto task1 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
batch.Close();
EXPECT_TRUE(batch.IsClosed());
std::vector<std::unique_ptr<FakeTask>> tasks_in_batch =
batch.RemoveAllTasks();
EXPECT_EQ(2, tasks_in_batch.size());
EXPECT_TRUE(batch.empty());
EXPECT_EQ(task0, tasks_in_batch[0].get());
EXPECT_EQ(task1, tasks_in_batch[1].get());
EXPECT_THAT(batch.RemoveAllTasks(), ::testing::IsEmpty());
EXPECT_THAT(batch.RemoveAllTasks(), ::testing::IsEmpty());
}
TEST(BatchTest, TryTrimToNewSizeTrimsAndReturnsTrimmedElementsInOrder) {
Batch<FakeTask> batch;
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
auto task1 = new FakeTask(5);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
auto task2 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task2));
auto task3 = new FakeTask(9);
batch.AddTask(std::unique_ptr<FakeTask>(task3));
std::vector<std::unique_ptr<FakeTask>> trimmed_tasks;
batch.TryTrimToNewSize( 8,
trimmed_tasks);
EXPECT_EQ(batch.size(), 8);
EXPECT_EQ(batch.num_tasks(), 2);
EXPECT_THAT(trimmed_tasks, ElementsAre(Pointer(task2), Pointer(task3)));
batch.Close();
}
TEST(BatchTest, TryTrimToNewSizeDoesNotTrimWhenItWouldNeedToSplitATask) {
Batch<FakeTask> batch;
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
auto task1 = new FakeTask(5);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
std::vector<std::unique_ptr<FakeTask>> trimmed_tasks;
batch.TryTrimToNewSize( 4,
trimmed_tasks);
EXPECT_EQ(batch.size(), 8);
EXPECT_EQ(batch.num_tasks(), 2);
EXPECT_TRUE(trimmed_tasks.empty());
batch.Close();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
54ac5196-c078-4979-b2bd-62baf3bb4e06 | cpp | tensorflow/tensorflow | fake_clock_env | tensorflow/core/util/fake_clock_env.cc | tensorflow/core/util/fake_clock_env_test.cc | #include "tensorflow/core/util/fake_clock_env.h"
#include <string>
namespace tensorflow {
FakeClockEnv::FakeClockEnv(Env* wrapped) : EnvWrapper(wrapped) {}
void FakeClockEnv::AdvanceByMicroseconds(int64_t micros) {
{
mutex_lock l(mu_);
current_time_ += micros;
}
}
uint64 FakeClockEnv::NowMicros() const {
{
mutex_lock l(mu_);
return current_time_;
}
}
} | #include "tensorflow/core/util/fake_clock_env.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace {
class FakeClockEnvTest : public ::testing::Test {
protected:
void SetUp() override {
fake_clock_env_ = std::make_unique<FakeClockEnv>(Env::Default());
}
void TearDown() override { fake_clock_env_.reset(); }
std::unique_ptr<FakeClockEnv> fake_clock_env_;
};
TEST_F(FakeClockEnvTest, TimeInitializedToZero) {
EXPECT_EQ(0, fake_clock_env_->NowMicros());
}
TEST_F(FakeClockEnvTest, AdvanceTimeByMicroseconds) {
int current_time = fake_clock_env_->NowMicros();
int64_t duration = 100;
current_time += duration;
fake_clock_env_->AdvanceByMicroseconds(duration);
EXPECT_EQ(current_time, fake_clock_env_->NowMicros());
for (int i = 0; i < 5; ++i) {
fake_clock_env_->AdvanceByMicroseconds(100);
current_time += 100;
}
EXPECT_EQ(current_time, fake_clock_env_->NowMicros());
current_time += duration;
duration = 200;
fake_clock_env_->AdvanceByMicroseconds(duration);
EXPECT_NE(current_time, fake_clock_env_->NowMicros());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4f9db86-47d0-4382-9092-c1d5e9321cd4 | cpp | tensorflow/tensorflow | bounded_executor | tensorflow/core/kernels/batching_util/bounded_executor.cc | tensorflow/core/kernels/batching_util/bounded_executor_test.cc | #include "tensorflow/core/kernels/batching_util/bounded_executor.h"
#include <algorithm>
#include <atomic>
#include "absl/functional/bind_front.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
namespace serving {
StatusOr<std::unique_ptr<BoundedExecutor>> BoundedExecutor::Create(
const Options& options) {
if (options.env == nullptr) {
return errors::InvalidArgument("options.env must not be nullptr");
}
if (options.num_threads <= 0) {
return errors::InvalidArgument("options.num_threads must be positive");
}
return absl::WrapUnique(new BoundedExecutor(options));
}
BoundedExecutor::BoundedExecutor(const Options& options) : options_(options) {
InitWorker();
}
void BoundedExecutor::InitWorker() {
for (int i = 0; i < options_.num_threads; i++) {
std::unique_ptr<Thread> thread = absl::WrapUnique(
options_.env->StartThread(options_.thread_options, options_.thread_name,
[this]() { this->Run(); }));
threads_.push_back(std::move(thread));
}
}
BoundedExecutor::~BoundedExecutor() {
{
mutex_lock l(work_queue_mu_);
for (int i = 0; i < NumThreads(); i++) {
work_queue_.push_back(nullptr);
work_queue_cv_.notify_one();
}
}
threads_.clear();
}
void BoundedExecutor::Schedule(std::function<void()> func) {
DCHECK(func != nullptr) << "func is nullptr";
mutex_lock l(work_queue_mu_);
work_queue_.push_back(std::move(func));
work_queue_cv_.notify_one();
}
int BoundedExecutor::NumThreads() const { return options_.num_threads; }
int BoundedExecutor::CurrentThreadId() const { return -1; }
void BoundedExecutor::Run() {
while (true) {
std::function<void()> func = nullptr;
{
mutex_lock l(work_queue_mu_);
while (work_queue_.empty()) {
work_queue_cv_.wait(l);
}
func = std::move(work_queue_.front());
work_queue_.pop_front();
}
if (func != nullptr) {
func();
} else {
break;
}
}
}
}
} | #include "tensorflow/core/kernels/batching_util/bounded_executor.h"
#include "absl/functional/bind_front.h"
#include "absl/time/time.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace serving {
namespace {
class TaskTracker {
public:
std::function<void()> MakeTask(int task_id, absl::Duration sleep_duration) {
return absl::bind_front(&TaskTracker::Run, this, task_id, sleep_duration);
}
void Run(int task_id, absl::Duration sleep_duration) {
LOG(INFO) << "Entering task " << task_id;
{
mutex_lock l(mutex_);
++task_count_;
++running_count_;
if (running_count_ > max_running_count_) {
max_running_count_ = running_count_;
}
}
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(sleep_duration));
{
mutex_lock l(mutex_);
--running_count_;
}
LOG(INFO) << "Task " << task_id << " exiting.";
}
int task_count() {
mutex_lock l(mutex_);
return task_count_;
}
int running_count() {
mutex_lock l(mutex_);
return running_count_;
}
int max_running_count() {
mutex_lock l(mutex_);
return max_running_count_;
}
private:
mutex mutex_;
int task_count_ = 0;
int running_count_ = 0;
int max_running_count_ = 0;
};
TEST(BoundedExecutorTest, InvalidEmptyEnv) {
BoundedExecutor::Options options;
options.num_threads = 2;
options.env = nullptr;
EXPECT_THAT(BoundedExecutor::Create(options),
::tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT, "options.env must not be nullptr"));
}
TEST(BoundedExecutorTest, InvalidNumThreads) {
{
BoundedExecutor::Options options;
options.num_threads = 0;
EXPECT_THAT(
BoundedExecutor::Create(options),
::tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT, "options.num_threads must be positive"));
}
{
BoundedExecutor::Options options;
options.num_threads = -1;
EXPECT_THAT(
BoundedExecutor::Create(options),
::tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT, "options.num_threads must be positive"));
}
}
TEST(BoundedExecutorTest, AddRunsFunctionsEventually) {
BoundedExecutor::Options options;
options.num_threads = 2;
TF_ASSERT_OK_AND_ASSIGN(auto executor, BoundedExecutor::Create(options));
Notification done0;
executor->Schedule([&done0] { done0.Notify(); });
Notification done1;
executor->Schedule([&done1] { done1.Notify(); });
done0.WaitForNotification();
done1.WaitForNotification();
executor.reset();
}
TEST(BoundedExecutorTest, MaxInflightLimit) {
BoundedExecutor::Options options;
options.num_threads = 5;
TF_ASSERT_OK_AND_ASSIGN(auto executor, BoundedExecutor::Create(options));
const int num_tasks = 100;
TaskTracker task_tracker;
for (int i = 0; i < num_tasks; i++) {
executor->Schedule(task_tracker.MakeTask(i, absl::Seconds(1)));
}
executor.reset();
EXPECT_EQ(task_tracker.task_count(), num_tasks);
EXPECT_EQ(task_tracker.max_running_count(), options.num_threads);
EXPECT_EQ(task_tracker.running_count(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/bounded_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/bounded_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
752a36ae-7bac-440c-b8fb-ae3cbe7201fb | cpp | tensorflow/tensorflow | periodic_function | tensorflow/core/kernels/batching_util/periodic_function.cc | tensorflow/core/kernels/batching_util/periodic_function_test.cc | #include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <algorithm>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace serving {
PeriodicFunction::PeriodicFunction(absl::AnyInvocable<void()> function,
const int64_t interval_micros,
const Options& options)
: function_(std::move(function)),
interval_micros_([interval_micros]() -> int64 {
if (interval_micros < 0) {
const string error = strings::StrCat(
" The value of 'interval_micros' should be >= 0: ",
interval_micros, ". ");
DCHECK(false) << error;
LOG(WARNING) << error << "Resetting it to 0.";
return 0;
}
return interval_micros;
}()),
options_(options) {
thread_.reset(options_.env->StartThread(
options_.thread_options, options_.thread_name_prefix, [this]() {
RunLoop(options_.env->NowMicros());
}));
}
PeriodicFunction::~PeriodicFunction() {
NotifyStop();
thread_.reset();
}
void PeriodicFunction::NotifyStop() {
if (!stop_thread_.HasBeenNotified()) {
stop_thread_.Notify();
}
}
void PeriodicFunction::RunLoop(const int64_t start) {
{
if (options_.startup_delay_micros > 0) {
const int64_t deadline = start + options_.startup_delay_micros;
options_.env->SleepForMicroseconds(deadline - start);
}
while (!stop_thread_.HasBeenNotified()) {
VLOG(3) << "Running function.";
const int64_t begin = options_.env->NowMicros();
function_();
const int64_t end =
std::max(static_cast<int64_t>(options_.env->NowMicros()), begin);
const int64_t deadline = begin + interval_micros_;
if (deadline > end) {
if (end > begin) {
VLOG(3) << "Reducing interval_micros from " << interval_micros_
<< " to " << (deadline - end);
}
options_.env->SleepForMicroseconds(deadline - end);
} else {
VLOG(3) << "Function took longer than interval_micros, so not sleeping";
}
}
}
}
}
} | #include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <memory>
#include <string>
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace internal {
class PeriodicFunctionTestAccess {
public:
explicit PeriodicFunctionTestAccess(PeriodicFunction* periodic_function)
: periodic_function_(periodic_function) {}
void NotifyStop() { periodic_function_->NotifyStop(); }
private:
PeriodicFunction* const periodic_function_;
};
}
namespace {
using test_util::FakeClockEnv;
void StopPeriodicFunction(PeriodicFunction* periodic_function,
FakeClockEnv* fake_clock_env,
const uint64 pf_interval_micros) {
fake_clock_env->BlockUntilThreadsAsleep(1);
internal::PeriodicFunctionTestAccess(periodic_function).NotifyStop();
fake_clock_env->AdvanceByMicroseconds(pf_interval_micros);
}
TEST(PeriodicFunctionTest, ObeyInterval) {
const int64_t kPeriodMicros = 2;
const int kCalls = 10;
int actual_calls = 0;
{
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
for (int i = 0; i < kCalls; ++i) {
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
ASSERT_EQ(actual_calls, kCalls + 1);
}
TEST(PeriodicFunctionTest, ObeyStartupDelay) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
int actual_calls = 0;
{
PeriodicFunction::Options options;
options.startup_delay_micros = kDelayMicros;
FakeClockEnv fake_clock_env(Env::Default());
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
EXPECT_EQ(0, actual_calls);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
StopPeriodicFunction(&periodic_function, &fake_clock_env, kDelayMicros);
}
EXPECT_EQ(1, actual_calls);
}
TEST(PeriodicFunctionTest, StartupDelayRace) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
mutex mu;
int counter = 0;
std::unique_ptr<Notification> listener(new Notification);
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
options.startup_delay_micros = kDelayMicros;
PeriodicFunction periodic_function(
[&mu, &counter, &listener]() {
mutex_lock l(mu);
counter++;
listener->Notify();
},
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(1, counter);
listener.reset(new Notification);
}
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(2, counter);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
TEST(PeriodicFunctionTest, MinInterval) {
PeriodicFunction periodic_function(
[]() { Env::Default()->SleepForMicroseconds(20 * 1000); }, 0);
}
class PeriodicFunctionWithFakeClockEnvTest : public ::testing::Test {
protected:
const int64_t kPeriodMicros = 50;
PeriodicFunctionWithFakeClockEnvTest()
: fake_clock_env_(Env::Default()),
counter_(0),
pf_(
[this]() {
mutex_lock l(counter_mu_);
++counter_;
},
kPeriodMicros, GetPeriodicFunctionOptions()) {}
PeriodicFunction::Options GetPeriodicFunctionOptions() {
PeriodicFunction::Options options;
options.thread_name_prefix = "ignore";
options.env = &fake_clock_env_;
return options;
}
void SetUp() override {
ASSERT_TRUE(AwaitCount(1));
}
void TearDown() override {
StopPeriodicFunction(&pf_, &fake_clock_env_, kPeriodMicros);
}
bool AwaitCount(int expected_counter) {
fake_clock_env_.BlockUntilThreadsAsleep(1);
{
mutex_lock lock(counter_mu_);
return counter_ == expected_counter;
}
}
FakeClockEnv fake_clock_env_;
mutex counter_mu_;
int counter_;
PeriodicFunction pf_;
};
TEST_F(PeriodicFunctionWithFakeClockEnvTest, FasterThanRealTime) {
fake_clock_env_.AdvanceByMicroseconds(kPeriodMicros / 2);
for (int i = 2; i < 7; ++i) {
fake_clock_env_.AdvanceByMicroseconds(
kPeriodMicros);
EXPECT_TRUE(AwaitCount(i));
}
}
TEST_F(PeriodicFunctionWithFakeClockEnvTest, SlowerThanRealTime) {
Env::Default()->SleepForMicroseconds(
125 * 1000);
EXPECT_TRUE(AwaitCount(1));
}
TEST(PeriodicFunctionDeathTest, BadInterval) {
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function([]() {}, -1),
".* should be >= 0");
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function(
[]() {}, -1, PeriodicFunction::Options()),
".* should be >= 0");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/periodic_function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/periodic_function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5377b2a-d6ad-4f29-a106-4bb6bead3701 | cpp | tensorflow/tensorflow | batch_scheduler_utils | tensorflow/core/kernels/batching_util/batch_scheduler_utils.cc | tensorflow/core/kernels/batching_util/batch_scheduler_utils_test.cc | #include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include <algorithm>
#include <vector>
#include "absl/algorithm/container.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace serving {
int GetNextAllowedBatchSize(int batch_size,
const std::vector<int32>& allowed_batch_sizes,
bool disable_padding) {
if (disable_padding || allowed_batch_sizes.empty()) {
return batch_size;
}
DCHECK(absl::c_is_sorted(allowed_batch_sizes));
DCHECK_GT(batch_size, 0);
for (int allowed_size : allowed_batch_sizes) {
if (allowed_size >= batch_size) {
return allowed_size;
}
}
LOG(ERROR) << "Batch size " << batch_size
<< " is greater than largest allowed size; ignoring allowed sizes "
"constraint.";
return batch_size;
}
int32 GetPrevAllowedBatchSize(int batch_size,
const std::vector<int32>& allowed_batch_sizes,
bool disable_padding) {
if (disable_padding || allowed_batch_sizes.empty()) {
return batch_size;
}
DCHECK(absl::c_is_sorted(allowed_batch_sizes));
DCHECK_GT(batch_size, 0);
auto result = std::find_if(
allowed_batch_sizes.rbegin(), allowed_batch_sizes.rend(),
[&](int allowed_size) { return allowed_size <= batch_size; });
if (result == allowed_batch_sizes.rend()) {
return batch_size;
}
return *result;
}
}
} | #include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_stats.h"
namespace tensorflow {
namespace serving {
namespace {
TEST(GetNextAllowedBatchSizeTest, PaddingDisallowed) {
EXPECT_EQ(GetNextAllowedBatchSize(3, {2, 4, 8}, true), 3);
}
TEST(GetNextAllowedBatchSizeTest, EmptyAllowedBatchSizes) {
EXPECT_EQ(GetNextAllowedBatchSize(3, {}, false), 3);
}
TEST(GetNextAllowedBatchSizeTest, NextAllowedBatchSizeFound) {
EXPECT_EQ(GetNextAllowedBatchSize(3, {2, 4, 8}, false), 4);
}
TEST(GetNextAllowedBatchSizeTest, AlreadyAllowedBatchSize) {
EXPECT_EQ(GetNextAllowedBatchSize(2, {2, 4, 8}, false), 2);
}
TEST(GetNextAllowedBatchSizeTest, GreaterThanAllowedBatchSize) {
EXPECT_EQ(GetNextAllowedBatchSize(10, {2, 4, 8}, false), 10);
}
TEST(GetPrevAllowedBatchSizeTest, PaddingDisallowed) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {2, 4, 8}, true), 3);
}
TEST(GetPrevAllowedBatchSizeTest, EmptyAllowedBatchSizes) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {}, false), 3);
}
TEST(GetPrevAllowedBatchSizeTest, PrevAllowedBatchSizeFound) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {1, 2, 4, 8}, false), 2);
}
TEST(GetPrevAllowedBatchSizeTest, NoSmallerAllowedBatchSizeFound) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {4, 8}, false), 3);
}
TEST(GetPrevAllowedBatchSizeTest, AlreadyAllowedBatchSize) {
EXPECT_EQ(GetPrevAllowedBatchSize(2, {1, 2, 4, 8}, false), 2);
}
TEST(GetPrevAllowedBatchSizeTest, GreaterThanMaxAllowedBatchSize) {
EXPECT_EQ(GetPrevAllowedBatchSize(10, {2, 4, 8}, false), 8);
}
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
size_t size() const override { return size_; }
private:
const size_t size_;
};
TEST(MaybeBatchDownTest, PadUp) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {1, 2, 4, 8},
false,
kPadUpPolicy,
nullptr,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 3);
}
TEST(MaybeBatchDownTest, BatchDown) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {1, 2, 4, 8},
false,
kBatchDownPolicy,
nullptr,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 2);
EXPECT_EQ(out_trimmed_tasks.size(), 1);
}
TEST(MaybeBatchDownTest, BatchDownDoesNotSplitTasks) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(2));
batch.Close();
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {1, 2, 4, 8},
false,
kBatchDownPolicy,
nullptr,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 3);
}
TEST(MaybeBatchDownTest, BatchDownDoesNothingWhenTheBatchSizeIsAlreadyAllowed) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {1, 2, 4, 8},
false,
kBatchDownPolicy,
nullptr,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 4);
}
TEST(MaybeBatchDownTest, BatchDownDoesNothingWhenNoSmallerAllowedSize) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {4, 8},
false,
kBatchDownPolicy,
nullptr,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 3);
}
TEST(MaybeBatchDownTest, MinimizeTpuCostPerRequestPicksBatchDown) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
ModelBatchStats model_batch_stats;
model_batch_stats.batch_size(2).tpu_cost().Register(absl::Seconds(2));
model_batch_stats.batch_size(4).tpu_cost().Register(absl::Seconds(3.1));
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {2, 4},
false,
kMinimizeTpuCostPerRequestPolicy,
&model_batch_stats,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 2);
}
TEST(MaybeBatchDownTest, MinimizeTpuCostPerRequestPicksPadUp) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
ModelBatchStats model_batch_stats;
model_batch_stats.batch_size(2).tpu_cost().Register(absl::Seconds(2));
model_batch_stats.batch_size(4).tpu_cost().Register(absl::Seconds(2.9));
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {2, 4},
false,
kMinimizeTpuCostPerRequestPolicy,
&model_batch_stats,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 3);
}
TEST(MaybeBatchDownTest, MinimizeTpuCostPerRequestIsOkWithMissingCosts) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
ModelBatchStats model_batch_stats;
model_batch_stats.batch_size(2).tpu_cost().Register(absl::Seconds(2));
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {2, 4},
false,
kMinimizeTpuCostPerRequestPolicy,
&model_batch_stats,
out_trimmed_tasks);
}
TEST(MaybeBatchDownTest, MinimizeTpuCostPerRequestDoesPadUpWhenNoModelStats) {
Batch<FakeTask> batch;
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.AddTask(std::make_unique<FakeTask>(1));
batch.Close();
std::vector<std::unique_ptr<FakeTask>> out_trimmed_tasks;
MaybeBatchDown(
batch, {2, 4},
false,
kMinimizeTpuCostPerRequestPolicy,
nullptr,
out_trimmed_tasks);
EXPECT_EQ(batch.size(), 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_scheduler_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_scheduler_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5637a841-1cf0-4e54-8f2c-c3b86f538850 | cpp | tensorflow/tensorflow | threadsafe_status | tensorflow/core/kernels/batching_util/threadsafe_status.cc | tensorflow/core/kernels/batching_util/threadsafe_status_test.cc | #include "tensorflow/core/kernels/batching_util/threadsafe_status.h"
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
const Status& ThreadSafeStatus::status() const& {
tf_shared_lock lock(mutex_);
return status_;
}
Status ThreadSafeStatus::status() && {
tf_shared_lock lock(mutex_);
return std::move(status_);
}
void ThreadSafeStatus::Update(const Status& new_status) {
if (new_status.ok()) {
return;
}
mutex_lock lock(mutex_);
status_.Update(new_status);
}
void ThreadSafeStatus::Update(Status&& new_status) {
if (new_status.ok()) {
return;
}
mutex_lock lock(mutex_);
status_.Update(std::forward<Status>(new_status));
}
} | #include "tensorflow/core/kernels/batching_util/threadsafe_status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
TEST(ThreadSafeStatus, DefaultOk) {
ThreadSafeStatus status;
TF_EXPECT_OK(status.status());
}
TEST(ThreadSafeStatus, Update) {
ThreadSafeStatus status;
TF_EXPECT_OK(status.status());
status.Update(errors::FailedPrecondition("original error"));
EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION);
status.Update(absl::OkStatus());
EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION);
status.Update(errors::Internal("new error"));
EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION);
}
TEST(ThreadSafeStatus, Move) {
ThreadSafeStatus status;
TF_EXPECT_OK(std::move(status).status());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/threadsafe_status.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/threadsafe_status_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f43339cd-f6ec-453e-ae21-0445215c2c4b | cpp | tensorflow/tensorflow | adjust_contrast_op | tensorflow/core/kernels/image/adjust_contrast_op.cc | tensorflow/core/kernels/image/adjust_contrast_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/adjust_contrast_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/determinism.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class AdjustContrastOp : public OpKernel {
public:
explicit AdjustContrastOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& factor = context->input(1);
const Tensor& min_value = context->input(2);
const Tensor& max_value = context->input(3);
OP_REQUIRES(context, input.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input.shape().DebugString()));
const int64_t height = input.dim_size(input.dims() - 3);
const int64_t width = input.dim_size(input.dims() - 2);
const int64_t channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(factor.shape()),
errors::InvalidArgument("contrast_factor must be scalar: ",
factor.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_value.shape()),
errors::InvalidArgument("min_value must be scalar: ",
min_value.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_value.shape()),
errors::InvalidArgument("max_value must be scalar: ",
max_value.shape().DebugString()));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"A deterministic GPU implementation of AdjustContrast is not"
" currently available."));
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
Tensor mean_values;
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::value,
TensorShape(input.shape()),
&mean_values));
if (input.NumElements() > 0) {
const int64_t batch = input.NumElements() / (height * width * channels);
const int64_t shape[4] = {batch, height, width, channels};
functor::AdjustContrast<Device, T>()(
context->eigen_device<Device>(), input.shaped<T, 4>(shape),
factor.scalar<float>(), min_value.scalar<float>(),
max_value.scalar<float>(), mean_values.shaped<float, 4>(shape),
output->shaped<float, 4>(shape));
}
}
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("AdjustContrast").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
AdjustContrastOp<CPUDevice, T>);
REGISTER_KERNEL(uint8);
REGISTER_KERNEL(int8);
REGISTER_KERNEL(int16);
REGISTER_KERNEL(int32);
REGISTER_KERNEL(float);
REGISTER_KERNEL(double);
#undef REGISTER_KERNEL
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU_SPEC(T) \
template <> \
void AdjustContrast<GPUDevice, T>::operator()( \
const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, \
typename TTypes<float>::ConstScalar contrast_factor, \
typename TTypes<float>::ConstScalar min_value, \
typename TTypes<float>::ConstScalar max_value, \
typename TTypes<float, 4>::Tensor mean_values, \
typename TTypes<float, 4>::Tensor output); \
extern template struct AdjustContrast<GPUDevice, T>;
DECLARE_GPU_SPEC(uint8);
DECLARE_GPU_SPEC(int8);
DECLARE_GPU_SPEC(int16);
DECLARE_GPU_SPEC(int32);
DECLARE_GPU_SPEC(float);
DECLARE_GPU_SPEC(double);
#undef DECLARE_GPU_SPEC
}
#define REGISTER_GPU_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("AdjustContrast").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
AdjustContrastOp<GPUDevice, T>);
REGISTER_GPU_KERNEL(uint8);
REGISTER_GPU_KERNEL(int8);
REGISTER_GPU_KERNEL(int16);
REGISTER_GPU_KERNEL(int32);
REGISTER_GPU_KERNEL(float);
REGISTER_GPU_KERNEL(double);
#undef REGISTER_GPU_KERNEL
#endif
class AdjustContrastOpV2Base : public OpKernel {
protected:
explicit AdjustContrastOpV2Base(OpKernelConstruction* context)
: OpKernel(context) {}
struct ComputeOptions {
const Tensor* input = nullptr;
const Tensor* factor = nullptr;
Tensor* output = nullptr;
int64_t batch = 0;
int64_t height = 0;
int64_t width = 0;
int64_t channels = 0;
};
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& factor = context->input(1);
OP_REQUIRES(context, input.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input.shape().DebugString()));
const int64_t height = input.dim_size(input.dims() - 3);
const int64_t width = input.dim_size(input.dims() - 2);
const int64_t channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(factor.shape()),
errors::InvalidArgument("contrast_factor must be scalar: ",
factor.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
if (input.NumElements() > 0) {
const int64_t batch = input.NumElements() / (height * width * channels);
ComputeOptions options;
options.input = &input;
options.factor = &factor;
options.output = output;
options.batch = batch;
options.height = height;
options.width = width;
options.channels = channels;
DoCompute(context, options);
}
}
virtual void DoCompute(OpKernelContext* context,
const ComputeOptions& options) = 0;
};
template <typename Device, typename T>
class AdjustContrastOpv2;
template <>
class AdjustContrastOpv2<CPUDevice, float> : public AdjustContrastOpV2Base {
public:
explicit AdjustContrastOpv2(OpKernelConstruction* context)
: AdjustContrastOpV2Base(context) {}
void DoCompute(OpKernelContext* context,
const ComputeOptions& options) override {
const int64_t batch = options.batch;
const int64_t height = options.height;
const int64_t width = options.width;
const int64_t channels = options.channels;
const int64_t image_size = height * width;
const Tensor* input = options.input;
const Tensor* factor = options.factor;
Tensor* output = options.output;
Tensor mean_values;
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<float>::value,
TensorShape({batch, channels}), &mean_values));
auto input_data = input->shaped<float, 3>({batch, image_size, channels});
auto mean_data = mean_values.tensor<float, 2>();
auto output_data = output->shaped<float, 3>({batch, image_size, channels});
ReduceMeanAcrossImage(input_data, mean_data, output_data);
BroadcastAcrossImage(mean_data, output_data);
IncrementWithScaling(input_data, factor->scalar<float>(), output_data);
}
private:
void ReduceMeanAcrossImage(typename TTypes<float, 3>::ConstTensor input,
typename TTypes<float, 2>::Tensor mean,
typename TTypes<float, 3>::Tensor scratch) {
const int64_t batch = input.dimension(0);
const int64_t image_size = input.dimension(1);
const int64_t channels = input.dimension(2);
TTypes<float, 1>::ConstTensor input_flat(&input(0, 0, 0), input.size());
TTypes<float, 1>::Tensor mean_flat(&mean(0, 0), mean.size());
TTypes<float, 1>::Tensor summation_scratch(&scratch(0, 0, 0),
scratch.size());
using Eigen::DenseIndex;
typedef Eigen::array<Eigen::DenseIndex, 1> Index;
const int64_t plane_size = image_size * channels;
for (int64_t i = 0; i < batch; i++) {
auto input_plane = input_flat.slice(Index{DenseIndex(i * plane_size)},
Index{DenseIndex(plane_size)});
auto summation_plane = summation_scratch.slice(
Index{DenseIndex(i * plane_size)}, Index{DenseIndex(plane_size)});
int64_t remaining_size = image_size;
int round = 0;
do {
int64_t right_size = remaining_size / 2;
int64_t left_size = remaining_size - right_size;
DCHECK(left_size == right_size || left_size == right_size + 1);
if (round == 0) {
summation_plane.slice(Index{0},
Index{DenseIndex(right_size * channels)}) =
input_plane.slice(Index{DenseIndex(left_size * channels)},
Index{DenseIndex(right_size * channels)}) +
input_plane.slice(Index{0},
Index{DenseIndex(right_size * channels)});
if (left_size > right_size) {
DCHECK_EQ(left_size - right_size, 1);
summation_plane.slice(Index{DenseIndex(right_size * channels)},
Index{DenseIndex(channels)}) =
input_plane.slice(Index{DenseIndex(right_size * channels)},
Index{DenseIndex(channels)});
}
} else {
summation_plane.slice(Index{0},
Index{DenseIndex(right_size * channels)}) +=
summation_plane.slice(Index{DenseIndex(left_size * channels)},
Index{DenseIndex(right_size * channels)});
}
remaining_size = left_size;
round++;
} while (remaining_size > 1);
const float mean_scaling = 1.0f / image_size;
auto mean_plane = mean_flat.slice(Index{DenseIndex(i * channels)},
Index{DenseIndex(channels)});
mean_plane =
summation_plane.slice(Index{0}, Index{DenseIndex(channels)}) *
mean_scaling;
}
}
void BroadcastAcrossImage(typename TTypes<float, 2>::Tensor inputs,
typename TTypes<float, 3>::Tensor outputs) {
int64_t batch = outputs.dimension(0);
int64_t image_size = outputs.dimension(1);
int64_t channels = outputs.dimension(2);
for (int64_t i = 0; i < batch; i++) {
const float* mean_p = &inputs(i, 0);
float* output_p = &outputs(i, 0, 0);
memcpy(output_p, mean_p, sizeof(float) * channels);
int64_t copied = 1;
while (copied < image_size) {
const int64_t kMaxToCopy = 1024;
int64_t to_copy = std::min({copied, image_size - copied, kMaxToCopy});
memcpy(output_p + channels * copied, output_p,
to_copy * channels * sizeof(float));
copied += to_copy;
}
}
}
void IncrementWithScaling(typename TTypes<float, 3>::ConstTensor input,
typename TTypes<float>::ConstScalar factor,
typename TTypes<float, 3>::Tensor output) {
const float factor_value = factor();
float* p = output.data();
const float* q = input.data();
for (int64_t n = 0; n < input.size(); ++n) {
p[n] += factor_value * (q[n] - p[n]);
}
}
};
REGISTER_KERNEL_BUILDER(
Name("AdjustContrastv2").Device(DEVICE_CPU).TypeConstraint<float>("T"),
AdjustContrastOpv2<CPUDevice, float>);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU_SPEC(T) \
template <> \
void AdjustContrastv2<GPUDevice, T>::operator()( \
const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, \
typename TTypes<float>::ConstScalar contrast_factor, \
typename TTypes<T, 4>::Tensor output); \
extern template struct AdjustContrastv2<GPUDevice, T>;
DECLARE_GPU_SPEC(float);
DECLARE_GPU_SPEC(Eigen::half);
#undef DECLARE_GPU_SPEC
}
template <typename T>
class AdjustContrastOpv2<GPUDevice, T> : public AdjustContrastOpV2Base {
public:
explicit AdjustContrastOpv2(OpKernelConstruction* context)
: AdjustContrastOpV2Base(context) {}
void DoCompute(OpKernelContext* context,
const ComputeOptions& options) override {
const int64_t shape[4] = {options.batch, options.height, options.width,
options.channels};
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"A deterministic GPU implementation of AdjustContrastv2 is not"
" currently available."));
functor::AdjustContrastv2<GPUDevice, T>()(
context->eigen_device<GPUDevice>(), options.input->shaped<T, 4>(shape),
options.factor->scalar<float>(), options.output->shaped<T, 4>(shape));
}
};
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("AdjustContrastv2").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
AdjustContrastOpv2<GPUDevice, T>);
REGISTER_GPU(float)
REGISTER_GPU(Eigen::half)
#undef REGISTER_GPU
#endif
} | #include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class AdjustContrastOpTest : public OpsTestBase {};
TEST_F(AdjustContrastOpTest, Simple_1113) {
TF_EXPECT_OK(NodeDefBuilder("adjust_contrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 1, 3}), {-1, 2, 3});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 3}));
test::FillValues<float>(&expected, {-1, 2, 3});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(AdjustContrastOpTest, Simple_1223) {
TF_EXPECT_OK(NodeDefBuilder("adjust_contrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2, 2, 3}),
{1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12});
AddInputFromArray<float>(TensorShape({}), {0.2f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 3}));
test::FillValues<float>(&expected, {2.2, 6.2, 10.2, 2.4, 6.4, 10.4, 2.6, 6.6,
10.6, 2.8, 6.8, 10.8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(AdjustContrastOpTest, Big_99x99x3) {
TF_EXPECT_OK(NodeDefBuilder("adjust_contrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
std::vector<float> values;
values.reserve(99 * 99 * 3);
for (int i = 0; i < 99 * 99 * 3; ++i) {
values.push_back(i % 255);
}
AddInputFromArray<float>(TensorShape({1, 99, 99, 3}), values);
AddInputFromArray<float>(TensorShape({}), {0.2f});
TF_ASSERT_OK(RunOpKernel());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/adjust_contrast_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/adjust_contrast_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
220965a0-a8d8-4037-8c3b-77ddd8fe7f52 | cpp | tensorflow/tensorflow | image_ops | tensorflow/compiler/tf2xla/kernels/image_ops.cc | tensorflow/core/ops/image_ops_test.cc | #include <array>
#include <numeric>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/kernels/gather_op_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/dynamic_shaped_ops.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/sorting.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
std::array<xla::XlaOp, 3> RGBToHSV(XlaOpKernelContext* ctx, xla::XlaBuilder* b,
const std::array<xla::XlaOp, 3>& rgb,
DataType dtype, const TensorShape& shape) {
auto zero = XlaHelpers::Zero(b, dtype);
auto one = XlaHelpers::One(b, dtype);
auto red = rgb[0];
auto green = rgb[1];
auto blue = rgb[2];
auto value = xla::Max(xla::Max(red, green), blue);
auto minimum = xla::Min(xla::Min(red, green), blue);
auto range = xla::Sub(value, minimum);
auto zeros = xla::Broadcast(zero, shape.dim_sizes());
auto saturation =
xla::Select(xla::Gt(value, zero), xla::Div(range, value), zeros);
auto norm = xla::Div(XlaHelpers::FloatLiteral(b, dtype, 1.0 / 6.0), range);
auto hue =
xla::Select(xla::Eq(green, value),
xla::Add(xla::Mul(norm, xla::Sub(blue, red)),
XlaHelpers::FloatLiteral(b, dtype, 2.0 / 6.0)),
xla::Add(xla::Mul(norm, xla::Sub(red, green)),
XlaHelpers::FloatLiteral(b, dtype, 4.0 / 6.0)));
hue = xla::Select(xla::Eq(red, value), xla::Mul(norm, xla::Sub(green, blue)),
hue);
hue = xla::Select(xla::Gt(range, zero), hue, zeros);
hue = xla::Select(xla::Lt(hue, zero), xla::Add(hue, one), hue);
return {hue, saturation, value};
}
std::array<xla::XlaOp, 3> HSVToRGB(xla::XlaBuilder* b,
const std::array<xla::XlaOp, 3>& hsv,
DataType dtype) {
xla::XlaOp hue = hsv[0];
xla::XlaOp saturation = hsv[1];
xla::XlaOp value = hsv[2];
auto zero = XlaHelpers::Zero(b, dtype);
auto one = XlaHelpers::FloatLiteral(b, dtype, 1.0);
auto two = XlaHelpers::FloatLiteral(b, dtype, 2.0);
auto three = XlaHelpers::FloatLiteral(b, dtype, 3.0);
auto four = XlaHelpers::FloatLiteral(b, dtype, 4.0);
auto six = XlaHelpers::FloatLiteral(b, dtype, 6.0);
auto dh = xla::Mul(hue, six);
auto dr = xla::Clamp(zero, xla::Sub(xla::Abs(xla::Sub(dh, three)), one), one);
auto dg = xla::Clamp(zero, xla::Sub(two, xla::Abs(xla::Sub(dh, two))), one);
auto db = xla::Clamp(zero, xla::Sub(two, xla::Abs(xla::Sub(dh, four))), one);
auto one_minus_s = xla::Sub(one, saturation);
auto red = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, dr)), value);
auto green = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, dg)), value);
auto blue = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, db)), value);
return {red, green, blue};
}
class RGBToHSVOp : public XlaOpKernel {
public:
explicit RGBToHSVOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape input_shape = context->InputShape(0);
OP_REQUIRES(context, input_shape.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input_shape.DebugString()));
int channel_dim = input_shape.dims() - 1;
int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::FailedPrecondition("input must have 3 channels but input has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input = context->Input(0);
xla::XlaOp red = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp green = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp blue = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv = RGBToHSV(context, b, {red, green, blue}, context->input_type(0),
channel_shape);
context->SetOutput(0, xla::ConcatInDim(b, hsv, channel_dim));
}
};
REGISTER_XLA_OP(Name("RGBToHSV"), RGBToHSVOp);
class HSVToRGBOp : public XlaOpKernel {
public:
explicit HSVToRGBOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape input_shape = context->InputShape(0);
OP_REQUIRES(context, input_shape.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input_shape.DebugString()));
int channel_dim = input_shape.dims() - 1;
int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::FailedPrecondition("input must have 3 channels but input has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input = context->Input(0);
xla::XlaOp hue = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp saturation = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp value = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
auto rgb = HSVToRGB(context->builder(), {hue, saturation, value},
context->input_type(0));
context->SetOutput(0, xla::ConcatInDim(b, rgb, channel_dim));
}
};
REGISTER_XLA_OP(Name("HSVToRGB"), HSVToRGBOp);
class AdjustContrastOpV2 : public XlaOpKernel {
public:
explicit AdjustContrastOpV2(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape& input_shape = context->InputShape(0);
const TensorShape& factor_shape = context->InputShape(1);
OP_REQUIRES(context, input_shape.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input_shape.DebugString()));
int height_dim = input_shape.dims() - 3;
int width_dim = input_shape.dims() - 2;
int channel_dim = input_shape.dims() - 1;
const int64_t height = input_shape.dim_size(height_dim);
const int64_t width = input_shape.dim_size(width_dim);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(factor_shape),
errors::InvalidArgument("contrast_factor must be scalar: ",
factor_shape.DebugString()));
xla::XlaBuilder* b = context->builder();
DataType type = context->input_type(0);
xla::XlaOp input = context->Input(0);
xla::XlaOp factor = XlaHelpers::ConvertElementType(context->Input(1), type);
const DataType accumulation_type = XlaHelpers::SumAccumulationType(type);
auto converted = XlaHelpers::ConvertElementType(input, accumulation_type);
auto reduce = xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
*context->GetOrCreateAdd(accumulation_type),
{height_dim, width_dim});
auto output = xla::Div(
reduce, XlaHelpers::FloatLiteral(b, accumulation_type, height * width));
output = XlaHelpers::ConvertElementType(output, type);
std::vector<int64_t> broadcast_dims(input_shape.dims() - 2);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims.back() = channel_dim;
output =
xla::Add(xla::Mul(input, factor),
xla::Mul(output, xla::Sub(XlaHelpers::One(b, type), factor)),
broadcast_dims);
context->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("AdjustContrastv2"), AdjustContrastOpV2);
class AdjustSaturationOp : public XlaOpKernel {
public:
explicit AdjustSaturationOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape& input_shape = context->InputShape(0);
const TensorShape& scale_shape = context->InputShape(1);
OP_REQUIRES(context, input_shape.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input_shape.DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(scale_shape),
errors::InvalidArgument("scale must be scalar: ",
scale_shape.DebugString()));
const int channel_dim = input_shape.dims() - 1;
const int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::InvalidArgument("input must have 3 channels but instead has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input =
XlaHelpers::ConvertElementType(context->Input(0), DT_FLOAT);
xla::XlaOp scale =
XlaHelpers::ConvertElementType(context->Input(1), DT_FLOAT);
DataType type = context->input_type(0);
xla::XlaOp red = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp green = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp blue = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv =
RGBToHSV(context, b, {red, green, blue}, DT_FLOAT, channel_shape);
hsv[1] = xla::Clamp(XlaHelpers::Zero(b, DT_FLOAT), xla::Mul(hsv[1], scale),
XlaHelpers::One(b, DT_FLOAT));
auto rgb = HSVToRGB(context->builder(), hsv, DT_FLOAT);
auto output = XlaHelpers::ConvertElementType(
xla::ConcatInDim(b, rgb, channel_dim), type);
context->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("AdjustSaturation"), AdjustSaturationOp);
class AdjustHueOp : public XlaOpKernel {
public:
explicit AdjustHueOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape& input_shape = context->InputShape(0);
const TensorShape& delta_shape = context->InputShape(1);
OP_REQUIRES(context, input_shape.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input_shape.DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(delta_shape),
errors::InvalidArgument("delta must be scalar: ",
delta_shape.DebugString()));
const int channel_dim = input_shape.dims() - 1;
const int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::InvalidArgument("input must have 3 channels but instead has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input =
XlaHelpers::ConvertElementType(context->Input(0), DT_FLOAT);
xla::XlaOp delta =
XlaHelpers::ConvertElementType(context->Input(1), DT_FLOAT);
DataType type = context->input_type(0);
xla::XlaOp red = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp green = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp blue = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv =
RGBToHSV(context, b, {red, green, blue}, DT_FLOAT, channel_shape);
auto zero = XlaHelpers::Zero(b, DT_FLOAT);
auto one = XlaHelpers::One(b, DT_FLOAT);
auto& hue = hsv[0];
hue = xla::Rem(xla::Add(hsv[0], delta), one);
hue =
xla::Select(xla::Lt(hue, zero), xla::Rem(xla::Add(one, hue), one), hue);
auto rgb = HSVToRGB(context->builder(), hsv, DT_FLOAT);
auto output = XlaHelpers::ConvertElementType(
xla::ConcatInDim(b, rgb, channel_dim), type);
context->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("AdjustHue"), AdjustHueOp);
struct WhileCondFn {
const int64_t num_boxes;
const int64_t output_size;
explicit WhileCondFn(int64_t num_boxes, int64_t output_size)
: num_boxes(num_boxes), output_size(output_size) {}
absl::StatusOr<xla::XlaOp> operator()(absl::Span<const xla::XlaOp> values,
xla::XlaBuilder* cond_builder) const {
xla::XlaOp row_idx = values[0];
xla::XlaOp row_in_bounds =
xla::Lt(row_idx, xla::ConstantR0<int32>(cond_builder, num_boxes));
xla::XlaOp num_outputs_so_far = values[1];
xla::XlaOp results_not_full = xla::Lt(
num_outputs_so_far, xla::ConstantR0<int32>(cond_builder, output_size));
return xla::And(row_in_bounds, results_not_full);
}
};
struct SuppressBodyFn {
const int64_t num_boxes;
explicit SuppressBodyFn(int64_t num_boxes) : num_boxes(num_boxes) {}
absl::StatusOr<std::vector<xla::XlaOp>> operator()(
absl::Span<const xla::XlaOp> values, xla::XlaBuilder* builder) const {
auto row_idx = values[0];
auto num_outputs_so_far = values[1];
auto iou_mask = values[2];
auto included_iou = values[3];
auto zero = xla::ConstantR0<int32>(builder, 0);
std::vector<xla::XlaOp> row_idx_vector = {row_idx};
auto active_elem = xla::DynamicSlice(included_iou, row_idx_vector, {1});
active_elem = xla::Reshape(active_elem, {});
num_outputs_so_far = xla::Select(
active_elem, num_outputs_so_far + xla::ConstantR0<int32>(builder, 1),
num_outputs_so_far);
auto row_iou = xla::DynamicSlice(iou_mask, {row_idx, zero}, {1, num_boxes});
TF_ASSIGN_OR_RETURN(auto iou_shape, builder->GetShape(iou_mask));
auto boxes_runtime_size = xla::GetDimensionSize(row_iou, 1);
if (iou_shape.is_dynamic_dimension(1)) {
row_iou = xla::SetDimensionSize(row_iou, boxes_runtime_size, 1);
}
row_iou = xla::DynamicUpdateSlice(
row_iou, xla::ConstantR2FromArray2D<bool>(builder, {{false}}),
{zero, row_idx});
row_iou = xla::Reshape(row_iou, {num_boxes});
auto supp_mask = xla::Not(row_iou);
auto cond = xla::Broadcast(active_elem, {num_boxes});
if (iou_shape.is_dynamic_dimension(1)) {
cond = xla::SetDimensionSize(cond, boxes_runtime_size, 0);
}
included_iou =
xla::Select(cond, xla::And(included_iou, supp_mask), included_iou);
row_idx = row_idx + xla::ConstantR0<int32>(builder, 1);
return std::vector<xla::XlaOp>{row_idx, num_outputs_so_far, iou_mask,
included_iou};
}
};
class NonMaxSuppressionOp : public XlaOpKernel {
public:
explicit NonMaxSuppressionOp(OpKernelConstruction* context)
: XlaOpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size",
&pad_to_max_output_size_));
}
void Compile(XlaOpKernelContext* context) override {
OP_REQUIRES(context, pad_to_max_output_size_,
errors::Unimplemented(
"XLA compilation requires pad_to_max_output_size == True"));
xla::XlaOp selected_indices, num_valid;
ComputeResult(context, pad_to_max_output_size_);
}
static void ComputeResult(XlaOpKernelContext* context,
bool pad_to_max_output_size = false) {
const TensorShape& boxes_shape = context->InputShape("boxes");
OP_REQUIRES(
context, TensorShapeUtils::IsMatrix(boxes_shape),
errors::InvalidArgument("boxes must be 2-D, currently: [",
std::to_string(boxes_shape.dim_size(0)), ",",
std::to_string(boxes_shape.dim_size(1)), "]"));
const int64_t num_boxes = boxes_shape.dim_size(0);
OP_REQUIRES(
context, boxes_shape.dim_size(1) == 4,
errors::InvalidArgument("boxes must have 4 columns, currently: ",
std::to_string(boxes_shape.dim_size(1))));
const TensorShape& scores_shape = context->InputShape("scores");
OP_REQUIRES(context, TensorShapeUtils::IsVector(scores_shape),
errors::InvalidArgument("scores must be 1-D, currently: ",
scores_shape.DebugString()));
OP_REQUIRES(context, scores_shape.dim_size(0) == num_boxes,
errors::InvalidArgument(
"scores size ", std::to_string(scores_shape.dim_size(0)),
" must equal number of boxes ", std::to_string(num_boxes)));
OP_REQUIRES(context, num_boxes <= kint32max,
errors::InvalidArgument("XLA compilation requires number of "
"boxes to be <= kint32max, got ",
num_boxes));
xla::PrimitiveType boxes_xla_type = context->InputXlaType("boxes");
xla::PrimitiveType scores_xla_type = context->InputXlaType("scores");
const xla::XlaOp boxes_input = context->Input("boxes");
const xla::XlaOp scores_input = context->Input("scores");
int64_t output_size;
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(context->InputShape("max_output_size")),
errors::InvalidArgument("Max Output Size isn't a scalar"));
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(context->InputShape("iou_threshold")),
errors::InvalidArgument("IOU Threshold isn't a scalar"));
OP_REQUIRES_OK(context, context->ConstantInputAsIntScalar(2, &output_size));
OP_REQUIRES(
context, output_size >= 0,
errors::InvalidArgument("Need output_size >= 0, got ", output_size));
OP_REQUIRES(context, output_size <= kint32max,
errors::InvalidArgument("Need output_size <= kint32Max, got ",
output_size));
const xla::XlaOp score_thresh = context->Input("score_threshold");
const xla::XlaOp iou_thresh = context->Input("iou_threshold");
xla::XlaBuilder* const builder = context->builder();
const xla::XlaOp boxes = xla::Transpose(boxes_input, {1, 0});
const xla::XlaOp boxes_sorted = xla::GetTupleElement(
xla::Sort({xla::Broadcast(scores_input, {4}), boxes},
xla::CreateScalarGtComputation(
{scores_xla_type, boxes_xla_type}, builder),
1),
1);
const xla::XlaOp iota_indices = xla::Iota(builder, xla::S32, num_boxes);
const xla::XlaOp indices_sort = xla::Sort(
{scores_input, iota_indices},
xla::CreateScalarGtComputation({scores_xla_type, xla::S32}, builder));
const xla::XlaOp indices_sorted = xla::GetTupleElement(indices_sort, 1);
const xla::XlaOp scores = xla::GetTupleElement(indices_sort, 0);
const xla::XlaOp c_y0 = xla::Reshape(xla::SliceInDim(boxes_sorted,
0,
1,
1,
0),
{num_boxes});
const xla::XlaOp c_x0 = xla::Reshape(xla::SliceInDim(boxes_sorted,
1,
2,
1,
0),
{num_boxes});
const xla::XlaOp c_y1 = xla::Reshape(xla::SliceInDim(boxes_sorted,
2,
3,
1,
0),
{num_boxes});
const xla::XlaOp c_x1 = xla::Reshape(xla::SliceInDim(boxes_sorted,
3,
4,
1,
0),
{num_boxes});
xla::XlaOp y1 = xla::Select(xla::Le(c_y0, c_y1), c_y0, c_y1);
xla::XlaOp y2 = xla::Select(xla::Le(c_y0, c_y1), c_y1, c_y0);
xla::XlaOp x1 = xla::Select(xla::Le(c_x0, c_x1), c_x0, c_x1);
xla::XlaOp x2 = xla::Select(xla::Le(c_x0, c_x1), c_x1, c_x0);
xla::XlaOp area = (y2 - y1) * (x2 - x1);
y1 = xla::Broadcast(y1, {1});
y2 = xla::Broadcast(y2, {1});
x1 = xla::Broadcast(x1, {1});
x2 = xla::Broadcast(x2, {1});
area = xla::Broadcast(area, {1});
xla::XlaOp i_xmin = xla::Max(x1, xla::Transpose(x1, {1, 0}));
xla::XlaOp i_ymin = xla::Max(y1, xla::Transpose(y1, {1, 0}));
xla::XlaOp i_xmax = xla::Min(x2, xla::Transpose(x2, {1, 0}));
xla::XlaOp i_ymax = xla::Min(y2, xla::Transpose(y2, {1, 0}));
auto square_zero = xla::ZerosLike(i_xmin);
xla::XlaOp i_area = xla::Max(i_xmax - i_xmin, square_zero) *
xla::Max(i_ymax - i_ymin, square_zero);
xla::XlaOp u_area = area + xla::Transpose(area, {1, 0}) - i_area;
xla::XlaOp iou = i_area / u_area;
xla::XlaOp iou_thresh_mask = xla::Gt(iou, iou_thresh + square_zero);
xla::XlaOp included_iou =
xla::Broadcast(xla::ConstantR0<bool>(builder, true), {num_boxes});
auto iou_shape_or = builder->GetShape(iou_thresh_mask);
OP_REQUIRES_OK(context, iou_shape_or.status());
auto boxes_runtime_size = xla::GetDimensionSize(iou_thresh_mask, 1);
if (iou_shape_or.value().is_dynamic_dimension(1)) {
included_iou = xla::SetDimensionSize(included_iou, boxes_runtime_size, 0);
}
std::vector<xla::XlaOp> init_values;
init_values.reserve(4);
init_values.push_back(xla::ConstantR0<int32>(builder, 0));
init_values.push_back(xla::ConstantR0<int32>(builder, 0));
init_values.push_back(iou_thresh_mask);
init_values.push_back(included_iou);
auto suppress_loop_result =
xla::WhileLoopHelper(WhileCondFn(num_boxes, output_size),
SuppressBodyFn(num_boxes), init_values,
"suppress_loop", builder)
.value();
xla::XlaOp included_score =
xla::Gt(scores, xla::Broadcast(score_thresh, {num_boxes}));
xla::XlaOp included = xla::And(included_score, suppress_loop_result[3]);
auto valid_elem = xla::Lt(
iota_indices, xla::Broadcast(suppress_loop_result[0], {num_boxes}));
included = xla::And(included, valid_elem);
xla::XlaOp neg_inf =
xla::Broadcast(xla::MinValue(builder, boxes_xla_type), {num_boxes});
xla::XlaOp scores_included = xla::Select(included, scores, neg_inf);
xla::XlaOp output_tuple = TopK(scores_included, output_size);
xla::XlaOp selected_indices_sorted = xla::GetTupleElement(output_tuple, 1);
xla::XlaOp ones_included = xla::Select(
included,
xla::Broadcast(xla::ConstantR0<int32>(builder, 1), {num_boxes}),
xla::Broadcast(xla::ConstantR0<int32>(builder, 0), {num_boxes}));
xla::XlaOp num_valid_total = xla::Reduce(
ones_included,
xla::ConstantR0<int>(builder, 0),
CreateScalarAddComputation(xla::S32, builder),
{0});
xla::XlaOp num_valid =
xla::Min(num_valid_total, xla::ConstantR0<int32>(builder, output_size));
xla::XlaOp selected_indices;
DataType gather_type = context->expected_output_dtype(0);
OP_REQUIRES_OK(
context,
XlaGather(indices_sorted, scores_shape, selected_indices_sorted,
TensorShape({output_size}),
0,
false,
gather_type, DT_INT32, builder, &selected_indices));
if (!pad_to_max_output_size) {
absl::StatusOr<xla::XlaOp> rebounded_result =
xla::SetDimensionSizeWithRebound(&context->value_inference(),
selected_indices, num_valid, 0);
if (rebounded_result.ok()) {
selected_indices = *rebounded_result;
} else {
selected_indices =
xla::SetDimensionSize(selected_indices, num_valid, 0);
}
}
context->SetOutput(0, selected_indices);
if (pad_to_max_output_size) context->SetOutput(1, num_valid);
}
private:
bool pad_to_max_output_size_;
};
REGISTER_XLA_OP(
Name("NonMaxSuppressionV4").CompileTimeConstantInput("max_output_size"),
NonMaxSuppressionOp);
class NonMaxSuppressionV3Op : public XlaOpKernel {
public:
explicit NonMaxSuppressionV3Op(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
xla::XlaOp selected_indices, num_valid;
NonMaxSuppressionOp::ComputeResult(context);
}
};
REGISTER_XLA_OP(
Name("NonMaxSuppressionV3").CompileTimeConstantInput("max_output_size"),
NonMaxSuppressionV3Op);
}
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ImageOpsTest, SampleDistortedBoundingBox_ShapeFn) {
ShapeInferenceTestOp op("SampleDistortedBoundingBox");
INFER_OK(op, "?;?", "[3];[3];[1,1,4]");
}
TEST(ImageOpsTest, Resize_ShapeFn) {
for (const char* op_name : {"ResizeArea", "ResizeBicubic", "ResizeBilinear",
"ResizeNearestNeighbor"}) {
ShapeInferenceTestOp op(op_name);
op.input_tensors.resize(2);
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "?;[3]");
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,20,30,d0_3]");
}
}
TEST(ImageOpsTest, DecodeGif) {
ShapeInferenceTestOp op("DecodeGif");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
INFER_OK(op, "?", "[?,?,?,3]");
INFER_OK(op, "[]", "[?,?,?,3]");
}
TEST(ImageOpTest, DecodeImage) {
ShapeInferenceTestOp op("DecodeImage");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("expand_animations", false)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("expand_animations", true)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "?");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[]");
}
TEST(ImageOpsTest, DecodeImage_ShapeFn) {
for (const char* op_name : {"DecodeJpeg", "DecodePng"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Attr("channels", 4)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,4]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[]");
}
}
TEST(ImageOpsTest, DecodeAndCropJpeg_ShapeFn) {
const char* op_name = "DecodeAndCropJpeg";
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Wrong number of inputs passed: 1 while 2 expected", op, "[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Attr("channels", 4)
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,4]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[];[]");
}
TEST(ImageOpsTest, DecodeAndCropJpeg_InvalidCropWindow) {
const char* op_name = "DecodeAndCropJpeg";
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Wrong number of inputs passed: 1 while 2 expected", op, "[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,?]");
}
TEST(ImageOpsTest, EncodeImage_ShapeFn) {
for (const char* op_name : {"EncodeJpeg"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[1,?,3]", "[]");
}
}
TEST(ImageOpsTest, BatchedEncodeImage_ShapeFn) {
for (const char* op_name : {"EncodePng"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[1,?,3]", "[]");
INFER_OK(op, "[?,1,?,3]", "[d0_0]");
INFER_OK(op, "[4,5,1,?,3]", "[d0_0,d0_1]");
}
}
TEST(ImageOpsTest, ExtractJpegShape_ShapeFn) {
ShapeInferenceTestOp op("ExtractJpegShape");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
INFER_OK(op, "?", "[3]");
}
TEST(ImageOpsTest, Colorspace_ShapeFn) {
for (const char* op_name : {"HSVToRGB", "RGBToHSV"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[1,2,4]");
INFER_OK(op, "[1,2,3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[1,2,?]", "[d0_0,d0_1,3]");
INFER_OK(op, "?", "?");
}
}
TEST(ImageOpsTest, ExtractGlimpse_ShapeFn) {
ShapeInferenceTestOp op("ExtractGlimpse");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "ExtractGlimpse")
.Input({"input", 0, DT_FLOAT})
.Input({"size", 1, DT_INT32})
.Input({"offsets", 2, DT_FLOAT})
.Attr("uniform_noise", true)
.Attr("noise", "")
.Finalize(&op.node_def));
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[];?");
INFER_ERROR("Dimension must be 2 but is 3", op, "?;[3];?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;[1,2,3]");
INFER_OK(op, "[1,?,3,?];[2];?", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2];?", "[d0_0,20,30,d0_3]");
INFER_OK(op, "[?,?,3,?];[2];[1,?]", "[d2_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[2];[1,?]", "[d0_0|d2_0,20,30,d_0|d0_3]");
INFER_ERROR("Dimensions must be equal, but are 10 and 1", op,
"[10,?,?,?];?;[1,2]");
}
TEST(ImageOpsTest, CropAndResize_ShapeFn) {
ShapeInferenceTestOp op("CropAndResize");
op.input_tensors.resize(4);
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?;?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;[1,2];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;?;[1,2]");
INFER_ERROR("Dimension must be 2 but is 1", op, "?;?;?;[1]");
INFER_OK(op, "[1,?,3,?];?;?;[2]", "[?,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[3] = &size_tensor;
INFER_OK(op, "[1,?,3,?];?;?;[2]", "[?,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[2,4];?;[2]", "[d1_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];?;[2];[2]", "[d2_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[?,4];[?];[2]", "[d1_0|d3_0,20,30,d0_3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 1", op, "?;[2,?];[1];?");
INFER_ERROR("Dimension must be 4 but is 3", op, "?;[?,3];?;?");
}
TEST(ImageOpsTest, ResizeNearestNeighborGrad_ShapeFn) {
ShapeInferenceTestOp op("ResizeNearestNeighborGrad");
op.input_tensors.resize(2);
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]")
INFER_ERROR("Dimension must be 2 but is 1", op, "?;[1]");
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,20,30,d0_3]");
}
TEST(ImageOpsTest, CropAndResizeGradImage_ShapeFn) {
ShapeInferenceTestOp op("CropAndResizeGradImage");
op.input_tensors.resize(4);
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;?;[1,2]");
INFER_OK(op, "?;?;?;?", "[?,?,?,?]");
Tensor image_size = test::AsTensor<int32>({10, 20, 30, 40});
op.input_tensors[3] = &image_size;
INFER_OK(op, "?;?;?;[1]", "[10, 20, 30, 40]");
}
TEST(ImageOpsTest, RandomCrop_ShapeFn) {
ShapeInferenceTestOp op("RandomCrop");
op.input_tensors.resize(2);
INFER_ERROR("must be rank 3", op, "[1,2];?");
INFER_ERROR("must be equal", op, "?;[3]");
INFER_ERROR("must be equal", op, "?;[1,2]");
INFER_OK(op, "[?,?,?];[2]", "[?,?,d0_2]");
Tensor size = test::AsTensor<int64_t>({10, 20});
op.input_tensors[1] = &size;
INFER_OK(op, "[?,?,?];[2]", "[10,20,d0_2]");
}
TEST(ImageOpsTest, QuantizedResizeBilinear_ShapeFn) {
ShapeInferenceTestOp op("QuantizedResizeBilinear");
op.input_tensors.resize(4);
NodeDefBuilder builder =
NodeDefBuilder("test", "QuantizedResizeBilinear")
.Input(NodeDefBuilder::NodeOut{"images", 0, DT_QINT32})
.Input(NodeDefBuilder::NodeOut{"size", 0, DT_INT32})
.Input(NodeDefBuilder::NodeOut{"min", 0, DT_FLOAT})
.Input(NodeDefBuilder::NodeOut{"max", 0, DT_FLOAT})
.Attr("T", DT_QINT32)
.Attr("Toutput", DT_QINT32);
TF_ASSERT_OK(builder.Finalize(&op.node_def));
INFER_OK(op, "[1,?,3,?];[2];[];[]",
"[d0_0,?,?,d0_3];[];[]");
INFER_ERROR("must be rank 0", op, "[1,?,3,?];[2];[?];[]");
INFER_ERROR("must be rank 0", op, "[1,?,3,?];[2];[];[?]");
const Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors.at(1) = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2];[];[]", "[d0_0,20,30,d0_3];[];[]");
}
TEST(ImageOpsTest, DrawBoundingBoxes_ShapeFn) {
ShapeInferenceTestOp op("DrawBoundingBoxes");
op.input_tensors.resize(2);
INFER_ERROR("must be rank 4", op, "[1,?,3];?");
INFER_ERROR("should be either 1 (GRY), 3 (RGB), or 4 (RGBA)", op,
"[1,?,?,5];?");
INFER_ERROR("must be rank 3", op, "[1,?,?,4];[1,4]");
INFER_ERROR("Dimension must be 4", op, "[1,?,?,4];[1,2,2]");
INFER_OK(op, "[4,?,?,4];?", "in0");
INFER_OK(op, "[?,?,?,?];[?,?,?]", "in0");
INFER_OK(op, "[4,?,?,4];[?,?,?]", "in0");
INFER_OK(op, "[4,?,?,4];[?,?,4]", "in0");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/image_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/image_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f41932d8-4e64-4b72-807e-e6ff75eaddc7 | cpp | tensorflow/tensorflow | colorspace_op | tensorflow/core/kernels/image/colorspace_op.cc | tensorflow/core/kernels/image/colorspace_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/colorspace_op.h"
#include <algorithm>
#include <cmath>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class RGBToHSVOp : public OpKernel {
public:
explicit RGBToHSVOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input.shape().DebugString()));
auto channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, channels == 3,
errors::FailedPrecondition(
"input must have 3 channels but input only has ", channels,
" channels."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
typename TTypes<T, 2>::ConstTensor input_data = input.flat_inner_dims<T>();
typename TTypes<T, 2>::Tensor output_data = output->flat_inner_dims<T>();
Tensor trange;
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({input_data.dimension(0)}),
&trange));
typename TTypes<T, 1>::Tensor range(trange.tensor<T, 1>());
functor::RGBToHSV<Device, T>()(context->eigen_device<Device>(), input_data,
range, output_data);
}
};
template <typename Device, typename T>
class HSVToRGBOp : public OpKernel {
public:
explicit HSVToRGBOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input.shape().DebugString()));
auto channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, channels == 3,
errors::FailedPrecondition(
"input must have 3 channels but input only has ", channels,
" channels."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
typename TTypes<T, 2>::ConstTensor input_data = input.flat_inner_dims<T>();
typename TTypes<T, 2>::Tensor output_data = output->flat_inner_dims<T>();
functor::HSVToRGB<Device, T>()(context->eigen_device<Device>(), input_data,
output_data);
}
};
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("RGBToHSV").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
RGBToHSVOp<CPUDevice, T>); \
template class RGBToHSVOp<CPUDevice, T>; \
REGISTER_KERNEL_BUILDER( \
Name("HSVToRGB").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
HSVToRGBOp<CPUDevice, T>); \
template class HSVToRGBOp<CPUDevice, T>;
TF_CALL_float(REGISTER_CPU);
TF_CALL_double(REGISTER_CPU);
TF_CALL_half(REGISTER_CPU);
TF_CALL_bfloat16(REGISTER_CPU);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU(T) \
template <> \
void RGBToHSV<GPUDevice, T>::operator()( \
const GPUDevice& d, TTypes<T, 2>::ConstTensor input_data, \
TTypes<T, 1>::Tensor range, TTypes<T, 2>::Tensor output_data); \
extern template struct RGBToHSV<GPUDevice, T>; \
template <> \
void HSVToRGB<GPUDevice, T>::operator()( \
const GPUDevice& d, TTypes<T, 2>::ConstTensor input_data, \
TTypes<T, 2>::Tensor output_data); \
extern template struct HSVToRGB<GPUDevice, T>;
TF_CALL_float(DECLARE_GPU);
TF_CALL_double(DECLARE_GPU);
}
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("RGBToHSV").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
RGBToHSVOp<GPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("HSVToRGB").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
HSVToRGBOp<GPUDevice, T>);
TF_CALL_float(REGISTER_GPU);
TF_CALL_double(REGISTER_GPU);
#endif
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
template <typename T>
class RGBToHSVOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_EXPECT_OK(NodeDefBuilder("rgb_to_hsv_op", "RGBToHSV")
.Input(FakeInput(data_type))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void CheckBlack(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, 0.0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckGray(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.5, .5, .5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, .5});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckWhite(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {1, 1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, 1.0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckRedMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.8f, .4f, .2f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * .2 / .6;
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckGreenMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.2f, .8f, .4f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (2.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckBlueMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.4f, .2f, .8f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (4.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckNegativeDifference(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0, .1f, .2f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (4.0 + (-.1 / .2));
T expected_s = .2 / .2;
T expected_v = .2 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
};
template <typename T>
class HSVToRGBOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_EXPECT_OK(NodeDefBuilder("hsv_to_rgb_op", "HSVToRGB")
.Input(FakeInput(data_type))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void CheckBlack(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, 0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0, 0, 0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckGray(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, .5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.5, .5, .5});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckWhite(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, 1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {1, 1, 1});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckRedMax(DataType data_type) {
T expected_h = 1. / 6. * .2 / .6;
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.8, .4, .2});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckGreenMax(DataType data_type) {
T expected_h = 1. / 6. * (2.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.2, .8, .4});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckBlueMax(DataType data_type) {
T expected_h = 1. / 6. * (4.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.0;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.4, .2, .8});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckNegativeDifference(DataType data_type) {
T expected_h = 1. / 6. * (4.0 + (-.1 / .2));
T expected_s = .2 / .2;
T expected_v = .2 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0, .1f, .2f});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
};
#define TEST_COLORSPACE(test, dt) \
TEST_F(test, CheckBlack) { \
MakeOp(dt); \
CheckBlack(dt); \
} \
TEST_F(test, CheckGray) { \
MakeOp(dt); \
CheckGray(dt); \
} \
TEST_F(test, CheckWhite) { \
MakeOp(dt); \
CheckWhite(dt); \
} \
TEST_F(test, CheckRedMax) { \
MakeOp(dt); \
CheckRedMax(dt); \
} \
TEST_F(test, CheckGreenMax) { \
MakeOp(dt); \
CheckGreenMax(dt); \
} \
TEST_F(test, CheckBlueMax) { \
MakeOp(dt); \
CheckBlueMax(dt); \
} \
TEST_F(test, CheckNegativeDifference) { \
MakeOp(dt); \
CheckNegativeDifference(dt); \
}
typedef RGBToHSVOpTest<float> rgb_to_hsv_float;
typedef RGBToHSVOpTest<double> rgb_to_hsv_double;
TEST_COLORSPACE(rgb_to_hsv_float, DT_FLOAT);
TEST_COLORSPACE(rgb_to_hsv_double, DT_DOUBLE);
typedef HSVToRGBOpTest<float> hsv_to_rgb_float;
typedef HSVToRGBOpTest<double> hsv_to_rgb_double;
TEST_COLORSPACE(hsv_to_rgb_float, DT_FLOAT);
TEST_COLORSPACE(hsv_to_rgb_double, DT_DOUBLE);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/colorspace_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/colorspace_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f688bb0a-8221-41a2-a40f-511fe2e7b5d6 | cpp | tensorflow/tensorflow | encode_jpeg_op | tensorflow/core/kernels/image/encode_jpeg_op.cc | tensorflow/core/kernels/image/encode_jpeg_op_test.cc | #include <memory>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/jpeg/jpeg_mem.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
class EncodeJpegOp : public OpKernel {
public:
explicit EncodeJpegOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("format", &format_));
if (format_.empty()) {
flags_.format = static_cast<jpeg::Format>(0);
} else if (format_ == "grayscale") {
flags_.format = jpeg::FORMAT_GRAYSCALE;
} else if (format_ == "rgb") {
flags_.format = jpeg::FORMAT_RGB;
} else {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"format must be '', grayscale or rgb, got ", format_));
}
OP_REQUIRES_OK(context, context->GetAttr("quality", &flags_.quality));
OP_REQUIRES(context, 0 <= flags_.quality && flags_.quality <= 100,
errors::InvalidArgument("quality must be in [0,100], got ",
flags_.quality));
OP_REQUIRES_OK(context,
context->GetAttr("progressive", &flags_.progressive));
OP_REQUIRES_OK(
context, context->GetAttr("optimize_size", &flags_.optimize_jpeg_size));
OP_REQUIRES_OK(context, context->GetAttr("chroma_downsampling",
&flags_.chroma_downsampling));
string density_unit;
OP_REQUIRES_OK(context, context->GetAttr("density_unit", &density_unit));
if (density_unit == "in") {
flags_.density_unit = 1;
} else if (density_unit == "cm") {
flags_.density_unit = 2;
} else {
OP_REQUIRES(context, false,
errors::InvalidArgument("density_unit must be 'in' or 'cm'",
density_unit));
}
OP_REQUIRES_OK(context, context->GetAttr("x_density", &flags_.x_density));
OP_REQUIRES_OK(context, context->GetAttr("y_density", &flags_.y_density));
OP_REQUIRES_OK(context, context->GetAttr("xmp_metadata", &xmp_metadata_));
flags_.xmp_metadata = xmp_metadata_;
}
void Compute(OpKernelContext* context) override {
const Tensor& image = context->input(0);
OP_REQUIRES(context, image.dims() == 3,
errors::InvalidArgument("image must be 3-dimensional",
image.shape().DebugString()));
OP_REQUIRES(
context,
FastBoundsCheck(image.NumElements(), std::numeric_limits<int32>::max()),
errors::InvalidArgument(
"Cannot encode images with >= max int32 elements"));
const int32_t dim_size0 = static_cast<int32>(image.dim_size(0));
const int32_t dim_size1 = static_cast<int32>(image.dim_size(1));
const int32_t dim_size2 = static_cast<int32>(image.dim_size(2));
int channels;
jpeg::CompressFlags adjusted_flags = flags_;
if (flags_.format == 0) {
channels = dim_size2;
if (channels == 1) {
adjusted_flags.format = jpeg::FORMAT_GRAYSCALE;
} else if (channels == 3) {
adjusted_flags.format = jpeg::FORMAT_RGB;
} else {
OP_REQUIRES(
context, false,
errors::InvalidArgument("image must have 1 or 3 channels, got ",
image.shape().DebugString()));
}
} else {
if (flags_.format == jpeg::FORMAT_GRAYSCALE) {
channels = 1;
} else {
channels = 3;
}
OP_REQUIRES(context, channels == dim_size2,
errors::InvalidArgument("format ", format_, " expects ",
channels, " channels, got ",
image.shape().DebugString()));
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({}), &output));
OP_REQUIRES(context,
jpeg::Compress(image.flat<uint8>().data(), dim_size1, dim_size0,
adjusted_flags, &output->scalar<tstring>()()),
errors::Internal("JPEG encoding failed"));
}
private:
string format_;
string xmp_metadata_;
jpeg::CompressFlags flags_;
};
REGISTER_KERNEL_BUILDER(Name("EncodeJpeg").Device(DEVICE_CPU), EncodeJpegOp);
class EncodeJpegVariableQualityOp : public OpKernel {
public:
explicit EncodeJpegVariableQualityOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& image = context->input(0);
OP_REQUIRES(context, image.dims() == 3,
errors::InvalidArgument("image must be 3-dimensional",
image.shape().DebugString()));
OP_REQUIRES(
context,
FastBoundsCheck(image.NumElements(), std::numeric_limits<int32>::max()),
errors::InvalidArgument(
"Cannot encode images with >= max int32 elements"));
const int32_t dim_size0 = static_cast<int32>(image.dim_size(0));
const int32_t dim_size1 = static_cast<int32>(image.dim_size(1));
const int32_t dim_size2 = static_cast<int32>(image.dim_size(2));
jpeg::CompressFlags adjusted_flags;
const Tensor& quality = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(quality.shape()),
errors::InvalidArgument("quality must be scalar: ",
quality.shape().DebugString()));
adjusted_flags.quality = quality.scalar<int>()();
OP_REQUIRES(context,
0 <= adjusted_flags.quality && adjusted_flags.quality <= 100,
errors::InvalidArgument("quality must be in [0,100], got ",
adjusted_flags.quality));
int channels;
channels = dim_size2;
if (channels == 1) {
adjusted_flags.format = jpeg::FORMAT_GRAYSCALE;
} else if (channels == 3) {
adjusted_flags.format = jpeg::FORMAT_RGB;
} else {
OP_REQUIRES(
context, false,
errors::InvalidArgument("image must have 1 or 3 channels, got ",
image.shape().DebugString()));
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({}), &output));
OP_REQUIRES(context,
jpeg::Compress(image.flat<uint8>().data(), dim_size1, dim_size0,
adjusted_flags, &output->scalar<tstring>()()),
errors::Internal("JPEG encoding failed"));
}
};
REGISTER_KERNEL_BUILDER(Name("EncodeJpegVariableQuality").Device(DEVICE_CPU),
EncodeJpegVariableQualityOp);
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using EncodeJpegWithVariableQualityTest = OpsTestBase;
TEST_F(EncodeJpegWithVariableQualityTest, FailsForInvalidQuality) {
TF_ASSERT_OK(NodeDefBuilder("encode_op", "EncodeJpegVariableQuality")
.Input(FakeInput(DT_UINT8))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<uint8>(TensorShape({2, 2, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
AddInputFromArray<int32>(TensorShape({}), {200});
Status status = RunOpKernel();
EXPECT_TRUE(errors::IsInvalidArgument(status));
EXPECT_TRUE(absl::StartsWith(status.message(), "quality must be in [0,100]"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/encode_jpeg_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/encode_jpeg_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
14261d17-de12-429d-884e-9463414a1834 | cpp | tensorflow/tensorflow | resize_bilinear_op | tensorflow/core/kernels/image/resize_bilinear_op.cc | tensorflow/core/kernels/image/resize_bilinear_op_test.cc | #define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/image/resize_bilinear_op.h"
#ifdef __SSE4_1__
#include <xmmintrin.h>
#endif
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/cast_op.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class ResizeBilinearOp : public OpKernel {
public:
explicit ResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor image_data(
context->input(0).tensor<T, 4>());
TTypes<float, 4>::Tensor output_data = st.output->tensor<float, 4>();
functor::ResizeBilinear<Device, T>()(
context->eigen_device<Device>(), image_data, st.height_scale,
st.width_scale, half_pixel_centers_, output_data);
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace {
struct CachedInterpolation {
int64_t lower;
int64_t upper;
float lerp;
};
template <typename Scaler>
inline void compute_interpolation_weights(const Scaler scaler,
const int64_t out_size,
const int64_t in_size,
const float scale,
CachedInterpolation* interpolation) {
interpolation[out_size].lower = 0;
interpolation[out_size].upper = 0;
for (int64_t i = out_size - 1; i >= 0; --i) {
const float in = scaler(i, scale);
const float in_f = std::floor(in);
interpolation[i].lower =
std::max(static_cast<int64_t>(in_f), static_cast<int64_t>(0));
interpolation[i].upper =
std::min(static_cast<int64_t>(std::ceil(in)), in_size - 1);
interpolation[i].lerp = in - in_f;
}
}
inline float compute_lerp(const float top_left, const float top_right,
const float bottom_left, const float bottom_right,
const float x_lerp, const float y_lerp) {
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
return top + (bottom - top) * y_lerp;
}
#ifdef __SSE4_1__
inline __m128 compute_lerp_v(const __m128 top_left, const __m128 top_right,
const __m128 bottom_left,
const __m128 bottom_right, const __m128 x_lerp,
const __m128 y_lerp) {
const __m128 top =
_mm_add_ps(top_left, _mm_mul_ps(_mm_sub_ps(top_right, top_left), x_lerp));
const __m128 bottom = _mm_add_ps(
bottom_left, _mm_mul_ps(_mm_sub_ps(bottom_right, bottom_left), x_lerp));
return _mm_add_ps(top, _mm_mul_ps(_mm_sub_ps(bottom, top), y_lerp));
}
#endif
template <typename T>
void ResizeLineChannels(const T* const ys_input_lower_ptr,
const T* const ys_input_upper_ptr,
const CachedInterpolation* const xs,
const float ys_lerp, const int64_t out_width,
float* out_y, const int channels) {
for (int64_t x = 0; x < out_width; ++x) {
const int64_t xs_lower = xs[x].lower;
const int64_t xs_upper = xs[x].upper;
const float xs_lerp = xs[x].lerp;
for (int c = 0; c < channels; ++c) {
const float top_left(ys_input_lower_ptr[xs_lower + c]);
const float top_right(ys_input_lower_ptr[xs_upper + c]);
const float bottom_left(ys_input_upper_ptr[xs_lower + c]);
const float bottom_right(ys_input_upper_ptr[xs_upper + c]);
out_y[x * channels + c] = compute_lerp(top_left, top_right, bottom_left,
bottom_right, xs_lerp, ys_lerp);
}
}
}
#ifdef __SSE4_1__
template <typename T>
inline __m128 load_3xfloat_v(T* values) {
return _mm_set_ps(0.0f, static_cast<float>(values[2]),
static_cast<float>(values[1]),
static_cast<float>(values[0]));
}
template <>
inline __m128 load_3xfloat_v(float* values) {
return _mm_loadu_ps(values);
}
template <typename T>
void ResizeLine3ChannelsVector(const T* const ys_input_lower_ptr,
const T* const ys_input_upper_ptr,
const CachedInterpolation* const xs,
const float ys_lerp, const int64_t out_width,
float* out_y) {
const __m128 ys_lerp_v = _mm_set1_ps(ys_lerp);
int64_t x = 0;
for (x = 0; x < out_width - 1; ++x) {
const int64_t xs_lower = xs[x].lower;
const int64_t xs_upper = xs[x].upper;
const __m128 xs_lerp_v = _mm_set1_ps(xs[x].lerp);
const __m128 top_left_v = load_3xfloat_v(ys_input_lower_ptr + xs_lower);
const __m128 top_right_v = load_3xfloat_v(ys_input_lower_ptr + xs_upper);
const __m128 bottom_left_v = load_3xfloat_v(ys_input_upper_ptr + xs_lower);
const __m128 bottom_right_v = load_3xfloat_v(ys_input_upper_ptr + xs_upper);
_mm_storeu_ps(out_y + x * 3,
compute_lerp_v(top_left_v, top_right_v, bottom_left_v,
bottom_right_v, xs_lerp_v, ys_lerp_v));
}
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs + out_width - 1,
ys_lerp, 1, out_y + (out_width - 1) * 3, 3);
}
#endif
template <typename T>
void resize_image(
typename TTypes<T, 4>::ConstTensor images, const int batch_size,
const int64_t in_height, const int64_t in_width, const int64_t out_height,
const int64_t out_width, const int channels,
const std::vector<CachedInterpolation>& xs,
const std::vector<CachedInterpolation>& ys,
typename TTypes<float, 4>::Tensor output) TF_ATTRIBUTE_NOINLINE;
template <typename T>
void resize_image(typename TTypes<T, 4>::ConstTensor images,
const int batch_size, const int64_t in_height,
const int64_t in_width, const int64_t out_height,
const int64_t out_width, const int channels,
const std::vector<CachedInterpolation>& xs_vec,
const std::vector<CachedInterpolation>& ys,
typename TTypes<float, 4>::Tensor output) {
const int64_t in_row_size = in_width * channels;
const int64_t in_batch_num_values = in_height * in_row_size;
const int64_t out_row_size = out_width * channels;
const T* input_b_ptr = images.data();
const CachedInterpolation* xs = xs_vec.data();
if (channels == 3) {
float* output_y_ptr = output.data();
for (int b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const T* ys_input_lower_ptr = input_b_ptr + ys[y].lower * in_row_size;
const T* ys_input_upper_ptr = input_b_ptr + ys[y].upper * in_row_size;
#ifdef __SSE4_1__
ResizeLine3ChannelsVector(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr);
#else
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr, 3);
#endif
output_y_ptr += out_row_size;
}
input_b_ptr += in_batch_num_values;
}
} else {
float* output_y_ptr = output.data();
for (int b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const T* ys_input_lower_ptr = input_b_ptr + ys[y].lower * in_row_size;
const T* ys_input_upper_ptr = input_b_ptr + ys[y].upper * in_row_size;
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr, channels);
output_y_ptr += out_row_size;
}
input_b_ptr += in_batch_num_values;
}
}
}
template <typename Device, typename T>
struct CastFloatTo {
void operator()(const Device& d, typename TTypes<float>::ConstFlat input,
typename TTypes<T>::Flat output) {
output.device(d) = input.template cast<T>();
}
};
template <typename T>
struct CastFloatTo<GPUDevice, T> {
void operator()(const GPUDevice& d, typename TTypes<float>::ConstFlat input,
typename TTypes<T>::Flat output) {
functor::CastFunctor<GPUDevice, T, float> cast;
cast(d, output, input);
}
};
}
namespace functor {
template <typename T>
struct ResizeBilinear<CPUDevice, T> {
void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor images,
const float height_scale, const float width_scale,
bool half_pixel_centers,
typename TTypes<float, 4>::Tensor output) {
const int batch_size = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
if (out_height == in_height && out_width == in_width) {
output = images.template cast<float>();
return;
}
std::vector<CachedInterpolation> ys(out_height + 1);
std::vector<CachedInterpolation> xs(out_width + 1);
if (half_pixel_centers) {
compute_interpolation_weights(HalfPixelScaler(), out_height, in_height,
height_scale, ys.data());
compute_interpolation_weights(HalfPixelScaler(), out_width, in_width,
width_scale, xs.data());
} else {
compute_interpolation_weights(LegacyScaler(), out_height, in_height,
height_scale, ys.data());
compute_interpolation_weights(LegacyScaler(), out_width, in_width,
width_scale, xs.data());
}
for (int i = 0; i < xs.size(); ++i) {
xs[i].lower *= channels;
xs[i].upper *= channels;
}
resize_image<T>(images, batch_size, in_height, in_width, out_height,
out_width, channels, xs, ys, output);
}
};
}
template <typename Device, typename T>
class ResizeBilinearOpGrad : public OpKernel {
public:
explicit ResizeBilinearOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerGradientState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
TTypes<float, 4>::ConstTensor input_grad =
context->input(0).tensor<float, 4>();
if (!std::is_same<T, Eigen::half>::value &&
!std::is_same<T, Eigen::bfloat16>::value) {
typename TTypes<T, 4>::Tensor output_grad(st.output->tensor<T, 4>());
functor::ResizeBilinearGrad<Device, T>()(
context->eigen_device<Device>(), input_grad, st.height_scale,
st.width_scale, half_pixel_centers_, output_grad);
} else {
Tensor output_grad;
OP_REQUIRES_OK(context, context->allocate_temp(
DT_FLOAT, st.output->shape(), &output_grad));
functor::ResizeBilinearGrad<Device, float>()(
context->eigen_device<Device>(), input_grad, st.height_scale,
st.width_scale, half_pixel_centers_, output_grad.tensor<float, 4>());
const Tensor& output_grad_const = output_grad;
CastFloatTo<Device, T>{}(context->template eigen_device<Device>(),
output_grad_const.template flat<float>(),
st.output->template flat<T>());
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace functor {
template <typename T>
struct ResizeBilinearGrad<CPUDevice, T> {
template <typename Scaler>
void ResizeGradCore(const Scaler& scaler,
typename TTypes<float, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output_grad) {
const Eigen::Index batch = output_grad.dimension(0);
const Eigen::Index original_height = output_grad.dimension(1);
const Eigen::Index original_width = output_grad.dimension(2);
const Eigen::Index channels = output_grad.dimension(3);
const Eigen::Index resized_height = input_grad.dimension(1);
const Eigen::Index resized_width = input_grad.dimension(2);
output_grad.setZero();
for (Eigen::Index b = 0; b < batch; ++b) {
for (Eigen::Index y = 0; y < resized_height; ++y) {
const float in_y = scaler(y, height_scale);
const Eigen::Index top_y_index =
std::max(static_cast<Eigen::Index>(floorf(in_y)),
static_cast<Eigen::Index>(0));
const Eigen::Index bottom_y_index = std::min(
static_cast<Eigen::Index>(ceilf(in_y)), original_height - 1);
const float y_lerp = in_y - floorf(in_y);
const float inverse_y_lerp = (1.0f - y_lerp);
for (Eigen::Index x = 0; x < resized_width; ++x) {
const float in_x = scaler(x, width_scale);
const Eigen::Index left_x_index =
std::max(static_cast<Eigen::Index>(floorf(in_x)),
static_cast<Eigen::Index>(0));
const Eigen::Index right_x_index = std::min(
static_cast<Eigen::Index>(ceilf(in_x)), original_width - 1);
const float x_lerp = in_x - floorf(in_x);
const float inverse_x_lerp = (1.0f - x_lerp);
for (Eigen::Index c = 0; c < channels; ++c) {
output_grad(b, top_y_index, left_x_index, c) +=
T(input_grad(b, y, x, c) * inverse_y_lerp * inverse_x_lerp);
output_grad(b, top_y_index, right_x_index, c) +=
T(input_grad(b, y, x, c) * inverse_y_lerp * x_lerp);
output_grad(b, bottom_y_index, left_x_index, c) +=
T(input_grad(b, y, x, c) * y_lerp * inverse_x_lerp);
output_grad(b, bottom_y_index, right_x_index, c) +=
T(input_grad(b, y, x, c) * y_lerp * x_lerp);
}
}
}
}
}
void operator()(const CPUDevice& d,
typename TTypes<float, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
const bool half_pixel_centers,
typename TTypes<T, 4>::Tensor output_grad) {
if (half_pixel_centers) {
return ResizeGradCore(HalfPixelScaler(), input_grad, height_scale,
width_scale, output_grad);
} else {
return ResizeGradCore(LegacyScaler(), input_grad, height_scale,
width_scale, output_grad);
}
}
};
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBilinear") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBilinearOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBilinearGrad").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
ResizeBilinearOpGrad<CPUDevice, T>);
TF_CALL_half(REGISTER_GRAD_KERNEL);
TF_CALL_float(REGISTER_GRAD_KERNEL);
TF_CALL_double(REGISTER_GRAD_KERNEL);
TF_CALL_bfloat16(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBilinear") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBilinearOp<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBilinearGrad").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
ResizeBilinearOpGrad<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
#endif
} | #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
enum class TestDevice { CPU, GPU };
class ResizeBilinearOpTestBase
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
explicit ResizeBilinearOpTestBase()
: align_corners_(false), half_pixel_centers_(false) {}
void SetUp() override {
if (GetParam() == TestDevice::GPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {}, "/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
const Tensor* SetRandomImageInput(const TensorShape& shape) {
inputs_.clear();
CHECK_EQ(shape.dims(), 4) << "All images must have 4 dimensions.";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(allocator(), DataTypeToEnum<float>::v(), shape);
input->flat<float>().setRandom();
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]),
DataTypeToEnum<float>::v());
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], DataTypeToEnum<float>::v());
inputs_.push_back({nullptr, input});
}
return input;
}
void ResizeBilinearBaseline(TTypes<float, 4>::ConstTensor images,
TTypes<float, 4>::Tensor output) {
const int batch = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
ASSERT_EQ(batch, output.dimension(0));
ASSERT_EQ(channels, output.dimension(3));
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
const float height_scale = in_height / static_cast<float>(out_height);
const float width_scale = in_width / static_cast<float>(out_width);
for (int b = 0; b < batch; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const float in_y =
half_pixel_centers_
? (static_cast<float>(y) + 0.5f) * height_scale - 0.5f
: y * height_scale;
const int64_t top_y_index = std::max(static_cast<int64_t>(floorf(in_y)),
static_cast<int64_t>(0));
const int64_t bottom_y_index =
std::min(static_cast<int64_t>(ceilf(in_y)), in_height - 1);
const float y_lerp = in_y - std::floor(in_y);
for (int64_t x = 0; x < out_width; ++x) {
const float in_x =
half_pixel_centers_
? (static_cast<float>(x) + 0.5f) * width_scale - 0.5f
: x * width_scale;
const int64_t left_x_index = std::max(
static_cast<int64_t>(floorf(in_x)), static_cast<int64_t>(0));
const int64_t right_x_index =
std::min(static_cast<int64_t>(ceilf(in_x)), in_width - 1);
const float x_lerp = in_x - std::floor(in_x);
for (int c = 0; c < channels; ++c) {
const float top_left = images(b, top_y_index, left_x_index, c);
const float top_right = images(b, top_y_index, right_x_index, c);
const float bottom_left =
images(b, bottom_y_index, left_x_index, c);
const float bottom_right =
images(b, bottom_y_index, right_x_index, c);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
output(b, y, x, c) = top + (bottom - top) * y_lerp;
}
}
}
}
}
void TestResize(int batch_size, int input_width, int input_height,
int channels, int output_width, int output_height) {
const TensorShape shape({batch_size, input_width, input_height, channels});
const Tensor* input = SetRandomImageInput(shape);
AddInputFromArray<int32>(TensorShape({2}), {output_width, output_height});
TF_ASSERT_OK(RunOpKernel());
std::unique_ptr<Tensor> expected(new Tensor(
allocator(), DataTypeToEnum<float>::v(),
TensorShape({batch_size, output_width, output_height, channels})));
ResizeBilinearBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
test::ExpectClose(*expected, *GetOutput(0), 4e-5);
}
void RunManyRandomTests(int channels) {
for (int batch_size : {1, 2, 5}) {
for (int in_w : {2, 4, 7, 20, 165}) {
for (int in_h : {1, 3, 5, 8, 100, 233}) {
for (int target_height : {1, 2, 3, 50, 113}) {
for (int target_width : {target_height, target_height / 2 + 1}) {
TestResize(batch_size, in_w, in_h, channels, target_width,
target_height);
}
}
}
}
}
}
bool align_corners_;
bool half_pixel_centers_;
};
class ResizeBilinearOpTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearOpTest() {}
};
class ResizeBilinearHalfPixelCentersOpTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearHalfPixelCentersOpTest() { half_pixel_centers_ = true; }
};
class ResizeBilinearOpAlignCornersTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearOpAlignCornersTest() { align_corners_ = true; }
};
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes1Channel) {
RunManyRandomTests(1);
}
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes3Channels) {
RunManyRandomTests(3);
}
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes4Channels) {
RunManyRandomTests(4);
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinearRandom2x2To1x1) {
const Tensor* input = SetRandomImageInput(TensorShape({1, 2, 2, 1}));
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
std::unique_ptr<Tensor> expected(new Tensor(
allocator(), DataTypeToEnum<float>::v(), TensorShape({1, 1, 1, 1})));
ResizeBilinearBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
EXPECT_EQ(input->flat<float>()(0), output->flat<float>()(0));
test::ExpectClose(*expected, *output);
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 5.0f / 3, 2,
7.0f / 3, 3, 10.0f / 3,
3, 11.0f / 3, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2,
2, 2.5, 3,
3, 3.5, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 2.5,
5.5, 7});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear3x3To4x4) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1.75, 2.5, 3,
3.25, 4, 4.75, 5.25,
5.5, 6.25, 7, 7.5,
7, 7.75, 8.5, 9});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 7.0f/3, 11.0f/3,
19.0f/3, 23.0f/3, 27.0f/3,
35.0f/3, 39.0f/3, 43.0f/3});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearHalfPixelCentersOpTest, TestDownsamples) {
TestResize(4, 298, 297, 3, 61, 71);
}
TEST_P(ResizeBilinearHalfPixelCentersOpTest, TestUpsamples) {
TestResize(4, 61, 71, 3, 298, 297);
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{ 1, 2.5, 4,
7, 8.5, 10,
13, 14.5, 16});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To3x3Batch2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 1}), {1, 2, 3, 4, 1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 5.0f/3, 2, 7.0f/3, 3, 10.0f/3, 3, 11.0f/3, 4,
1, 5.0f/3, 2, 7.0f/3, 3, 10.0f/3, 3, 11.0f/3, 4
});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2x2To3x3x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 2}),
{1, -1, 2, -2, 3, -3, 4, -4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 2}));
test::FillValues<float>(&expected,
{
1, -1,
5.0f/3, -5.0f/3,
2, -2,
7.0f/3, -7.0f/3,
3, -3,
10.0f/3, -10.0f/3,
3, -3,
11.0f/3, -11.0f/3,
4, -4
});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2, 2,
2, 2.5, 3, 3,
3, 3.5, 4, 4,
3, 3.5, 4, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, Test1_1c) { TestResize(1, 183, 299, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test1_3c) { TestResize(1, 183, 299, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test2_1c) { TestResize(1, 141, 186, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test2_3c) { TestResize(1, 141, 186, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test3_1c) { TestResize(1, 749, 603, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test3_3c) { TestResize(1, 749, 603, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test4_1c) { TestResize(1, 299, 299, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test4_3c) { TestResize(1, 299, 299, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test5_1c) { TestResize(1, 298, 297, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test5_3c) { TestResize(1, 298, 297, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test6_1c) { TestResize(1, 304, 303, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test6_3c) { TestResize(1, 304, 303, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, TestInvalidOutputSize) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "output dimensions must be positive"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidInputShape) {
AddInputFromArray<float>(TensorShape({2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "input must be 4-dimensional"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidSizeDim) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {4, 4});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "shape_t must be 1-dimensional"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidSizeElements) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {4, 4, 1});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "shape_t must have two elements"))
<< s;
}
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestCpu, ResizeBilinearOpTest,
::testing::Values(TestDevice::CPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestCpu,
ResizeBilinearHalfPixelCentersOpTest,
::testing::Values(TestDevice::CPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestCpu,
ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::CPU));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestGpu, ResizeBilinearOpTest,
::testing::Values(TestDevice::GPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestGpu,
ResizeBilinearHalfPixelCentersOpTest,
::testing::Values(TestDevice::GPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestGpu,
ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::GPU));
#endif
class ResizeBM : public ResizeBilinearOpTest {
public:
void TestBody() override {}
void SetUpBenchmark(int input_width, int input_height, int num_channels,
int output_width, int output_height) {
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const TensorShape shape(
{ 1, input_width, input_height, num_channels});
SetRandomImageInput(shape);
AddInputFromArray<int32>(TensorShape({2}), {output_width, output_height});
}
using ResizeBilinearOpTest::RunOpKernel;
};
#ifdef PLATFORM_GOOGLE
void BM_Resize(benchmark::State& state) {
ResizeBM bench;
bench.SetUpBenchmark(640, 480, 3, 1024, 768);
for (const auto _ : state) {
CHECK(bench.RunOpKernel().ok());
}
}
BENCHMARK(BM_Resize);
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bilinear_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bilinear_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1f63ef80-aa42-46d4-82fe-0e4fc4d801cd | cpp | tensorflow/tensorflow | crop_and_resize_op | tensorflow/core/kernels/image/crop_and_resize_op.cc | tensorflow/core/kernels/image/crop_and_resize_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/crop_and_resize_op.h"
#include <functional>
#include <string>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/work_sharder.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
#include "tensorflow/core/platform/stream_executor.h"
#endif
#if GOOGLE_CUDA
#include "xla/stream_executor/gpu/scoped_activate_context.h"
using stream_executor::gpu::ScopedActivateContext;
#elif TENSORFLOW_USE_ROCM
#include "tensorflow/core/platform/rocm.h"
using stream_executor::gpu::ScopedActivateContext;
#endif
namespace tensorflow {
namespace {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
using Callback = std::function<void()>;
static inline Status ParseAndCheckBoxSizes(const Tensor& boxes,
const Tensor& box_index,
int* num_boxes) {
if (boxes.NumElements() == 0 && box_index.NumElements() == 0) {
*num_boxes = 0;
return absl::OkStatus();
}
if (boxes.dims() != 2) {
return errors::InvalidArgument("boxes must be 2-D",
boxes.shape().DebugString());
}
*num_boxes = boxes.dim_size(0);
if (boxes.dim_size(1) != 4) {
return errors::InvalidArgument("boxes must have 4 columns");
}
if (box_index.dims() != 1) {
return errors::InvalidArgument("box_index must be 1-D",
box_index.shape().DebugString());
}
if (box_index.dim_size(0) != *num_boxes) {
return errors::InvalidArgument("box_index has incompatible shape");
}
return absl::OkStatus();
}
template <typename Device>
inline void RunIfBoxIndexIsValid(
OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index,
int batch_size, const Callback& compute, const Callback& done);
template <>
inline void RunIfBoxIndexIsValid<CPUDevice>(
OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index,
int batch_size, const Callback& compute, const Callback& done) {
const int num_boxes = box_index.dimension(0);
for (int b = 0; b < num_boxes; ++b) {
OP_REQUIRES_ASYNC(
context, FastBoundsCheck(box_index(b), batch_size),
errors::OutOfRange("box_index has values outside [0, batch_size)"),
done);
}
if (compute) {
compute();
}
if (done) {
done();
}
}
}
template <typename Device, typename T>
class CropAndResizeOp : public AsyncOpKernel {
public:
explicit CropAndResizeOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("method", &method_));
OP_REQUIRES(context, method_ == "bilinear" || method_ == "nearest",
errors::InvalidArgument(
"method must be 'bilinear' or 'nearest'", method_));
OP_REQUIRES_OK(context, context->GetAttr("extrapolation_value",
&extrapolation_value_));
}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
const Tensor& image = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const Tensor& crop_size = context->input(3);
OP_REQUIRES_ASYNC(context, image.dims() == 4,
errors::InvalidArgument("input image must be 4-D",
image.shape().DebugString()),
done);
const int batch_size = image.dim_size(0);
const int image_height = image.dim_size(1);
const int image_width = image.dim_size(2);
const int depth = image.dim_size(3);
OP_REQUIRES_ASYNC(
context, image_height > 0 && image_width > 0,
errors::InvalidArgument("image dimensions must be positive"), done);
OP_REQUIRES_ASYNC(
context, boxes.dims() == 2,
absl::InvalidArgumentError(absl::StrCat("boxes must be 2-D, got: ",
boxes.shape().DebugString())),
done);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsVector(box_index.shape()),
errors::InvalidArgument("box_indices must be rank 1 but is shape ",
box_index.shape().DebugString()),
done);
int num_boxes = 0;
OP_REQUIRES_OK_ASYNC(
context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done);
OP_REQUIRES_ASYNC(context, crop_size.dims() == 1,
errors::InvalidArgument("crop_size must be 1-D",
crop_size.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(
context, crop_size.dim_size(0) == 2,
errors::InvalidArgument("crop_size must have two elements",
crop_size.shape().DebugString()),
done);
auto crop_size_vec = crop_size.vec<int32>();
const int crop_height = internal::SubtleMustCopy(crop_size_vec(0));
const int crop_width = internal::SubtleMustCopy(crop_size_vec(1));
OP_REQUIRES_ASYNC(
context, crop_height > 0 && crop_width > 0,
errors::InvalidArgument("crop dimensions must be positive"), done);
TensorShape shape;
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(num_boxes), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_height), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_width), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done);
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output),
done);
auto compute_callback = [this, context, output]() {
const Tensor& image = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const bool status = functor::CropAndResize<Device, T>()(
context, image.tensor<T, 4>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), method_, extrapolation_value_,
output->tensor<float, 4>());
if (!status) {
context->SetStatus(
errors::Internal("Failed to launch CropAndResizeKernel."));
}
};
RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(),
batch_size, std::move(compute_callback),
std::move(done));
}
private:
float extrapolation_value_;
string method_;
};
namespace functor {
template <typename T>
struct CropAndResize<CPUDevice, T> {
bool operator()(OpKernelContext* context,
typename TTypes<T, 4>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
const string& method_name, float extrapolation_value,
typename TTypes<float, 4>::Tensor crops) {
const int batch_size = image.dimension(0);
const int image_height = image.dimension(1);
const int image_width = image.dimension(2);
const int num_boxes = crops.dimension(0);
const int crop_height = crops.dimension(1);
const int crop_width = crops.dimension(2);
const int depth = crops.dimension(3);
const Eigen::Tensor<bool, 0, Eigen::RowMajor> only_finite_elements =
boxes.isfinite().all();
if (!only_finite_elements()) {
context->SetStatus(errors::InvalidArgument(
"Boxes contains at least one element that is not finite"));
return false;
}
auto CropAndResizePerBox = [&](int64_t start_box, int64_t limit_box) {
for (int b = start_box; b < limit_box; ++b) {
const float y1 = boxes(b, 0);
const float x1 = boxes(b, 1);
const float y2 = boxes(b, 2);
const float x2 = boxes(b, 3);
const int32_t b_in = box_index(b);
if (!FastBoundsCheck(b_in, batch_size)) {
continue;
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y) {
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1) {
for (int x = 0; x < crop_width; ++x) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
}
continue;
}
if (method_name == "bilinear") {
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d) {
const float top_left(static_cast<float>(
image(b_in, top_y_index, left_x_index, d)));
const float top_right(static_cast<float>(
image(b_in, top_y_index, right_x_index, d)));
const float bottom_left(static_cast<float>(
image(b_in, bottom_y_index, left_x_index, d)));
const float bottom_right(static_cast<float>(
image(b_in, bottom_y_index, right_x_index, d)));
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
crops(b, y, x, d) = top + (bottom - top) * y_lerp;
}
}
} else {
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
continue;
}
const int closest_x_index = roundf(in_x);
const int closest_y_index = roundf(in_y);
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = static_cast<float>(
image(b_in, closest_y_index, closest_x_index, d));
}
}
}
}
}
};
double cost_per_pixel =
depth * (Eigen::TensorOpCost::AddCost<float>() * 6 +
Eigen::TensorOpCost::MulCost<float>() * 3 +
Eigen::TensorOpCost::CastCost<T, float>() * 4) +
(Eigen::TensorOpCost::AddCost<float>() * 2 +
Eigen::TensorOpCost::AddCost<float>() * 3);
if (method_name == "nearest") {
cost_per_pixel = depth * Eigen::TensorOpCost::CastCost<T, float>() +
Eigen::TensorOpCost::AddCost<float>() * 4 +
Eigen::TensorOpCost::MulCost<float>() * 4;
}
const double cost_per_box = crop_height * crop_width * cost_per_pixel;
const DeviceBase::CpuWorkerThreads& worker_threads =
*(context->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, num_boxes,
cost_per_box, CropAndResizePerBox);
return true;
}
};
}
template <typename Device, typename T>
class CropAndResizeGradImageOp : public AsyncOpKernel {
public:
explicit CropAndResizeGradImageOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("method", &method_));
OP_REQUIRES(context, method_ == "bilinear" || method_ == "nearest",
errors::InvalidArgument(
"method must be 'bilinear' or 'nearest'", method_));
}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
const Tensor& grads = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const Tensor& image_size = context->input(3);
OP_REQUIRES_ASYNC(context, grads.dims() == 4,
errors::InvalidArgument("grads image must be 4-D",
grads.shape().DebugString()),
done);
const int crop_height = grads.dim_size(1);
const int crop_width = grads.dim_size(2);
OP_REQUIRES_ASYNC(
context, crop_height > 0 && crop_width > 0,
errors::InvalidArgument("grads dimensions must be positive"), done);
int num_boxes = 0;
OP_REQUIRES_OK_ASYNC(
context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done);
OP_REQUIRES_ASYNC(
context, grads.dim_size(0) == num_boxes,
errors::InvalidArgument("boxes and grads have incompatible shape"),
done);
OP_REQUIRES_ASYNC(context, image_size.dims() == 1,
errors::InvalidArgument("image_size must be 1-D",
image_size.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, image_size.dim_size(0) == 4,
errors::InvalidArgument("image_size must have 4 elements",
image_size.shape().DebugString()),
done);
auto image_size_vec = image_size.vec<int32>();
const int batch_size = internal::SubtleMustCopy(image_size_vec(0));
const int image_height = internal::SubtleMustCopy(image_size_vec(1));
const int image_width = internal::SubtleMustCopy(image_size_vec(2));
const int depth = internal::SubtleMustCopy(image_size_vec(3));
OP_REQUIRES_ASYNC(
context, image_height > 0 && image_width > 0,
errors::InvalidArgument("image dimensions must be positive"), done);
OP_REQUIRES_ASYNC(
context, grads.dim_size(3) == depth,
errors::InvalidArgument("image_size and grads are incompatible"), done);
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES_ASYNC(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"Deterministic GPU implementation of CropAndResizeBackpropImage"
" not available."),
done);
}
TensorShape shape;
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(batch_size), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_height), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_width), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done);
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output),
done);
auto compute_callback = [this, context, output]() {
const Tensor& grads = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const bool status = functor::CropAndResizeBackpropImage<Device, T>()(
context, grads.tensor<float, 4>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), output->tensor<T, 4>(), method_);
if (!status) {
context->SetStatus(errors::Internal(
"Failed to launch CropAndResizeBackpropImage kernel."));
}
};
RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(),
batch_size, std::move(compute_callback),
std::move(done));
}
private:
string method_;
};
namespace functor {
template <typename T>
struct CropAndResizeBackpropImage<CPUDevice, T> {
bool operator()(const OpKernelContext* context,
typename TTypes<float, 4>::ConstTensor grads,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
typename TTypes<T, 4>::Tensor grads_image,
const string& method_name) {
const int batch_size = grads_image.dimension(0);
const int image_height = grads_image.dimension(1);
const int image_width = grads_image.dimension(2);
const int num_boxes = grads.dimension(0);
const int crop_height = grads.dimension(1);
const int crop_width = grads.dimension(2);
const int depth = grads.dimension(3);
grads_image.setZero();
auto CropAndResizeBackImgPerBox = [&](int64_t start_box,
int64_t limit_box) {
for (int b = start_box; b < limit_box; ++b) {
const float y1 = boxes(b, 0);
const float x1 = boxes(b, 1);
const float y2 = boxes(b, 2);
const float x2 = boxes(b, 3);
const int32_t b_in = box_index(b);
if (!FastBoundsCheck(b_in, batch_size)) {
continue;
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y) {
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1) {
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
continue;
}
if (method_name == "bilinear") {
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d) {
const float dtop = (1 - y_lerp) * grads(b, y, x, d);
grads_image(b_in, top_y_index, left_x_index, d) +=
static_cast<T>((1 - x_lerp) * dtop);
grads_image(b_in, top_y_index, right_x_index, d) +=
static_cast<T>(x_lerp * dtop);
const float dbottom = y_lerp * grads(b, y, x, d);
grads_image(b_in, bottom_y_index, left_x_index, d) +=
static_cast<T>((1 - x_lerp) * dbottom);
grads_image(b_in, bottom_y_index, right_x_index, d) +=
static_cast<T>(x_lerp * dbottom);
}
} else {
for (int d = 0; d < depth; ++d) {
int closest_x_index = roundf(in_x);
int closest_y_index = roundf(in_y);
grads_image(b_in, closest_y_index, closest_x_index, d) +=
static_cast<T>(grads(b, y, x, d));
}
}
}
}
}
};
const double cost_per_pixel =
(method_name == "bilinear"
? depth * (Eigen::TensorOpCost::AddCost<float>() * 7 +
Eigen::TensorOpCost::MulCost<float>() * 6 +
Eigen::TensorOpCost::CastCost<T, float>() * 4) +
Eigen::TensorOpCost::AddCost<float>() * 4
: depth * (Eigen::TensorOpCost::AddCost<float>() +
Eigen::TensorOpCost::CastCost<T, float>()) +
Eigen::TensorOpCost::AddCost<float>() * 3);
const double cost_per_box = crop_height * crop_width * cost_per_pixel;
const DeviceBase::CpuWorkerThreads& worker_threads =
*(context->device()->tensorflow_cpu_worker_threads());
int max_threads = OpDeterminismRequired() ? 1 : worker_threads.num_threads;
Shard(max_threads, worker_threads.workers, num_boxes, cost_per_box,
CropAndResizeBackImgPerBox);
return true;
}
};
}
template <typename Device, typename T>
class CropAndResizeGradBoxesOp : public AsyncOpKernel {
public:
explicit CropAndResizeGradBoxesOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {
string method;
OP_REQUIRES_OK(context, context->GetAttr("method", &method));
OP_REQUIRES(context, method == "bilinear",
errors::InvalidArgument("method must be 'bilinear'", method));
}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
const Tensor& grads = context->input(0);
const Tensor& boxes = context->input(2);
const Tensor& box_index = context->input(3);
const Tensor& image = context->input(1);
OP_REQUIRES_ASYNC(context, grads.dims() == 4,
errors::InvalidArgument("grads image must be 4-D",
grads.shape().DebugString()),
done);
const int crop_height = grads.dim_size(1);
const int crop_width = grads.dim_size(2);
const int depth = grads.dim_size(3);
OP_REQUIRES_ASYNC(
context, crop_height > 0 && crop_width > 0,
errors::InvalidArgument("grads dimensions must be positive"), done);
OP_REQUIRES_ASYNC(context, image.dims() == 4,
errors::InvalidArgument("input image must be 4-D",
image.shape().DebugString()),
done);
const int batch_size = image.dim_size(0);
const int image_height = image.dim_size(1);
const int image_width = image.dim_size(2);
OP_REQUIRES_ASYNC(
context, image_height > 0 && image_width > 0,
errors::InvalidArgument("image dimensions must be positive"), done);
OP_REQUIRES_ASYNC(context, image.dim_size(3) == depth,
errors::InvalidArgument("image, grads depth differ"),
done);
int num_boxes = 0;
OP_REQUIRES_OK_ASYNC(
context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done);
OP_REQUIRES_ASYNC(
context, grads.dim_size(0) == num_boxes,
errors::InvalidArgument("boxes and grads have incompatible shape"),
done);
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES_ASYNC(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"Deterministic GPU implementation of CropAndResizeBackpropBoxes"
" not available."),
done);
}
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_output(0, TensorShape({num_boxes, 4}), &output),
done);
auto compute_callback = [context, output]() {
const Tensor& grads = context->input(0);
const Tensor& image = context->input(1);
const Tensor& boxes = context->input(2);
const Tensor& box_index = context->input(3);
const bool status = functor::CropAndResizeBackpropBoxes<Device, T>()(
context->eigen_device<Device>(), grads.tensor<float, 4>(),
image.tensor<T, 4>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), output->tensor<float, 2>());
if (!status) {
context->SetStatus(errors::Internal(
"Failed to launch CropAndResizeBackpropBoxes kernel."));
}
};
RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(),
batch_size, std::move(compute_callback),
std::move(done));
}
};
namespace functor {
template <typename T>
struct CropAndResizeBackpropBoxes<CPUDevice, T> {
bool operator()(const CPUDevice& d,
typename TTypes<float, 4>::ConstTensor grads,
typename TTypes<T, 4>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
typename TTypes<float, 2>::Tensor grads_boxes) {
const int batch_size = image.dimension(0);
const int image_height = image.dimension(1);
const int image_width = image.dimension(2);
const int num_boxes = grads.dimension(0);
const int crop_height = grads.dimension(1);
const int crop_width = grads.dimension(2);
const int depth = grads.dimension(3);
grads_boxes.setZero();
for (int b = 0; b < num_boxes; ++b) {
const float y1 = boxes(b, 0);
const float x1 = boxes(b, 1);
const float y2 = boxes(b, 2);
const float x2 = boxes(b, 3);
const int32_t b_in = box_index(b);
if (!FastBoundsCheck(b_in, batch_size)) {
continue;
}
const float height_ratio =
(crop_height > 1)
? static_cast<float>(image_height - 1) / (crop_height - 1)
: 0;
const float width_ratio =
(crop_width > 1)
? static_cast<float>(image_width - 1) / (crop_width - 1)
: 0;
const float height_scale =
(crop_height > 1) ? (y2 - y1) * height_ratio : 0;
const float width_scale = (crop_width > 1) ? (x2 - x1) * width_ratio : 0;
for (int y = 0; y < crop_height; ++y) {
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1) {
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d) {
const float top_left(
static_cast<float>(image(b_in, top_y_index, left_x_index, d)));
const float top_right(
static_cast<float>(image(b_in, top_y_index, right_x_index, d)));
const float bottom_left(static_cast<float>(
image(b_in, bottom_y_index, left_x_index, d)));
const float bottom_right(static_cast<float>(
image(b_in, bottom_y_index, right_x_index, d)));
float image_grad_y = (1 - x_lerp) * (bottom_left - top_left) +
x_lerp * (bottom_right - top_right);
float image_grad_x = (1 - y_lerp) * (top_right - top_left) +
y_lerp * (bottom_right - bottom_left);
const float top_grad = grads(b, y, x, d);
image_grad_y *= top_grad;
image_grad_x *= top_grad;
if (crop_height > 1) {
grads_boxes(b, 0) +=
image_grad_y * (image_height - 1 - y * height_ratio);
grads_boxes(b, 2) += image_grad_y * (y * height_ratio);
} else {
grads_boxes(b, 0) += image_grad_y * 0.5 * (image_height - 1);
grads_boxes(b, 2) += image_grad_y * 0.5 * (image_height - 1);
}
if (crop_width > 1) {
grads_boxes(b, 1) +=
image_grad_x * (image_width - 1 - x * width_ratio);
grads_boxes(b, 3) += image_grad_x * (x * width_ratio);
} else {
grads_boxes(b, 1) += image_grad_x * 0.5 * (image_width - 1);
grads_boxes(b, 3) += image_grad_x * 0.5 * (image_width - 1);
}
}
}
}
}
return true;
}
};
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("CropAndResize") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("crop_size"), \
CropAndResizeOp<CPUDevice, T>); \
\
REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradBoxes") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T"), \
CropAndResizeGradBoxesOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradImage") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("image_size"), \
CropAndResizeGradImageOp<CPUDevice, T>);
TF_CALL_half(REGISTER_KERNEL);
TF_CALL_float(REGISTER_KERNEL);
TF_CALL_double(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
template <>
void CheckValidBoxIndexHelper<GPUDevice>::operator()(
const GPUDevice& d, typename TTypes<int32, 1>::ConstTensor box_index,
int batch_size, typename TTypes<bool, 0>::Tensor isvalid);
extern template struct CheckValidBoxIndexHelper<GPUDevice>;
}
namespace {
template <>
inline void RunIfBoxIndexIsValid<GPUDevice>(
OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index,
int batch_size, const Callback& compute, const Callback& done) {
const int num_boxes = box_index.dimension(0);
if (num_boxes == 0) {
compute();
done();
return;
}
Tensor isvalid_dev_tensor;
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_temp(DataTypeToEnum<bool>::value, TensorShape({}),
&isvalid_dev_tensor),
done);
typename TTypes<bool, 0>::Tensor isvalid_dev =
isvalid_dev_tensor.tensor<bool, 0>();
functor::CheckValidBoxIndexHelper<GPUDevice>()(
context->eigen_device<GPUDevice>(), box_index, batch_size, isvalid_dev);
auto* stream = context->op_device_context()->stream();
OP_REQUIRES_ASYNC(context, stream,
errors::Internal("No GPU stream available."), done);
Tensor isvalid_host_tensor;
AllocatorAttributes alloc_attr;
alloc_attr.set_on_host(true);
alloc_attr.set_gpu_compatible(true);
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_temp(DataTypeToEnum<bool>::value, TensorShape({}),
&isvalid_host_tensor, alloc_attr),
done);
se::DeviceMemoryBase wrapped(isvalid_dev.data(), sizeof(bool));
const bool status =
stream
->Memcpy(isvalid_host_tensor.scalar<bool>().data() ,
wrapped , sizeof(bool))
.ok();
OP_REQUIRES_ASYNC(
context, status,
errors::Internal("Failed to launch copy of isvalid from device to host."),
done);
TensorReference isvalid_dev_ref(isvalid_dev_tensor);
auto wrapped_callback = [context, isvalid_host_tensor, isvalid_dev_ref,
compute, done]() {
{
auto stream = context->op_device_context()->stream();
ScopedActivateContext scoped_activation{stream->parent()};
const bool isvalid = isvalid_host_tensor.scalar<bool>()();
isvalid_dev_ref.Unref();
OP_REQUIRES_ASYNC(
context, isvalid,
errors::OutOfRange("box_index has values outside [0, batch_size)"),
done);
compute();
}
done();
};
context->device()
->tensorflow_accelerator_device_info()
->event_mgr->ThenExecute(stream, wrapped_callback);
}
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("CropAndResize") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("crop_size"), \
CropAndResizeOp<GPUDevice, T>); \
\
REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradImage") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("image_size"), \
CropAndResizeGradImageOp<GPUDevice, T>); \
\
REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradBoxes") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T"), \
CropAndResizeGradBoxesOp<GPUDevice, T>);
TF_CALL_half(REGISTER_KERNEL);
TF_CALL_float(REGISTER_KERNEL);
TF_CALL_double(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#endif
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class CropAndResizeOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(float extrapolation_value, const string& method) {
TF_EXPECT_OK(NodeDefBuilder("crop_and_resize_op", "CropAndResize")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("extrapolation_value", extrapolation_value)
.Attr("method", method)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(CropAndResizeOpTest, TestCropAndResize##T) { \
MakeOp<T>(0, "bilinear"); \
AddInputFromArray<T>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); \
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); \
AddInputFromArray<int32>(TensorShape({1}), {0}); \
AddInputFromArray<int32>(TensorShape({2}), {1, 1}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); \
test::FillValues<float>(&expected, {2.5}); \
test::ExpectTensorEqual<float>(expected, *GetOutput(0)); \
} \
\
TEST_F(CropAndResizeOpTest, TestCropAndResize##T##nearest) { \
MakeOp<T>(0, "nearest"); \
AddInputFromArray<T>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); \
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); \
AddInputFromArray<int32>(TensorShape({1}), {0}); \
AddInputFromArray<int32>(TensorShape({2}), {1, 1}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); \
test::FillValues<float>(&expected, {4.0}); \
test::ExpectTensorEqual<float>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Uint8) {
MakeOp<uint8>(0, "bilinear");
AddInputFromArray<uint8>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {2.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Uint8NearestNeibor) {
MakeOp<uint8>(0, "nearest");
AddInputFromArray<uint8>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {4.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Flipped) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {2.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1FlippedNearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {4.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2,
2, 2.5, 3,
3, 3.5, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3NearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3Flipped) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{4, 3.5, 3,
3, 2.5, 2,
2, 1.5, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3FlippedNearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{4, 4, 3,
4, 4, 3,
2, 2, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {0, 0, 1, 1, 0, 0, 0.5, 0.5});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9,
1, 2,
4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2NearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {0, 0, 1, 1, 0, 0, 0.5, 0.5});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9,
1, 2,
4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2Flipped) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {1, 1, 0, 0, 0.5, 0.5, 0, 0});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{9, 7,
3, 1,
5, 4,
2, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2FlippedNearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {1, 1, 0, 0, 0.5, 0.5, 0, 0});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{9, 7,
3, 1,
5, 4,
2, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3Extrapolated) {
const float v = -1;
MakeOp<float>(v, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {-1, -1, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{v, v, v,
v, 1, 2,
v, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3NoCrop) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<int32>(TensorShape({0}), {});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({0, 3, 3, 1}));
test::FillValues<float>(&expected, {});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestInvalidInputShape) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "input image must be 4-D")) << s;
}
TEST_F(CropAndResizeOpTest, TestInvalidBoxIndexShape) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "box_index has incompatible shape"))
<< s;
}
TEST_F(CropAndResizeOpTest, TestInvalidBoxIndex) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {1});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(),
"box_index has values outside [0, batch_size)"))
<< s;
}
TEST_F(CropAndResizeOpTest, TestWithSharding) {
MakeOp<float>(0, "bilinear");
const int kLength = 999;
const int kHalf = (kLength + 1) / 2;
AddInput<float>(TensorShape({1, kLength, kLength, 1}),
[=](int i) -> float { return i % kLength; });
AddInputFromArray<float>(TensorShape({2, 4}),
{0, 0, 0.5, 0.5, 0.5, 0.5, 1, 1});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {kHalf, kHalf});
TF_ASSERT_OK(RunOpKernel());
Tensor result1(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1}));
test::FillFn<float>(&result1, [=](int i) -> float { return i % kHalf; });
Tensor result2(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1}));
test::FillFn<float>(&result2,
[=](int i) -> float { return i % kHalf + kHalf - 1; });
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, kHalf, kHalf, 1}));
TF_ASSERT_OK(tensor::Concat({result1, result2}, &expected));
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/crop_and_resize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/crop_and_resize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c51d0205-c7bd-4898-90c7-d1f12f00a9d1 | cpp | tensorflow/tensorflow | non_max_suppression_op | tensorflow/core/kernels/image/non_max_suppression_op.cc | tensorflow/core/kernels/image/non_max_suppression_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/non_max_suppression_op.h"
#include <cmath>
#include <functional>
#include <limits>
#include <queue>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
typedef Eigen::ThreadPoolDevice CPUDevice;
static inline void CheckScoreSizes(OpKernelContext* context, int num_boxes,
const Tensor& scores) {
OP_REQUIRES(context, scores.dims() == 1,
errors::InvalidArgument(
"scores must be 1-D", scores.shape().DebugString(),
" (Shape must be rank 1 but is rank ", scores.dims(), ")"));
OP_REQUIRES(
context, scores.dim_size(0) == num_boxes,
errors::InvalidArgument("scores has incompatible shape (Dimensions must "
"be equal, but are ",
num_boxes, " and ", scores.dim_size(0), ")"));
}
static inline void ParseAndCheckOverlapSizes(OpKernelContext* context,
const Tensor& overlaps,
int* num_boxes) {
OP_REQUIRES(context, overlaps.dims() == 2,
errors::InvalidArgument("overlaps must be 2-D",
overlaps.shape().DebugString()));
*num_boxes = overlaps.dim_size(0);
OP_REQUIRES(context, overlaps.dim_size(1) == *num_boxes,
errors::InvalidArgument("overlaps must be square",
overlaps.shape().DebugString()));
}
static inline void ParseAndCheckBoxSizes(OpKernelContext* context,
const Tensor& boxes, int* num_boxes) {
OP_REQUIRES(context, boxes.dims() == 2,
errors::InvalidArgument(
"boxes must be 2-D", boxes.shape().DebugString(),
" (Shape must be rank 2 but is rank ", boxes.dims(), ")"));
*num_boxes = boxes.dim_size(0);
OP_REQUIRES(context, boxes.dim_size(1) == 4,
errors::InvalidArgument("boxes must have 4 columns (Dimension "
"must be 4 but is ",
boxes.dim_size(1), ")"));
}
static inline void CheckCombinedNMSScoreSizes(OpKernelContext* context,
int num_boxes,
const Tensor& scores) {
OP_REQUIRES(context, scores.dims() == 3,
errors::InvalidArgument("scores must be 3-D",
scores.shape().DebugString()));
OP_REQUIRES(context, scores.dim_size(1) == num_boxes,
errors::InvalidArgument("scores has incompatible shape"));
}
static inline void ParseAndCheckCombinedNMSBoxSizes(OpKernelContext* context,
const Tensor& boxes,
int* num_boxes,
const int num_classes) {
OP_REQUIRES(context, boxes.dims() == 4,
errors::InvalidArgument("boxes must be 4-D",
boxes.shape().DebugString()));
bool box_check = boxes.dim_size(2) == 1 || boxes.dim_size(2) == num_classes;
OP_REQUIRES(context, box_check,
errors::InvalidArgument(
"third dimension of boxes must be either 1 or num classes"));
*num_boxes = boxes.dim_size(1);
OP_REQUIRES(context, boxes.dim_size(3) == 4,
errors::InvalidArgument("boxes must have 4 columns"));
}
template <typename T>
static inline float IOU(typename TTypes<T, 2>::ConstTensor boxes, int i,
int j) {
const float ymin_i = Eigen::numext::mini<float>(
static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2)));
const float xmin_i = Eigen::numext::mini<float>(
static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3)));
const float ymax_i = Eigen::numext::maxi<float>(
static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2)));
const float xmax_i = Eigen::numext::maxi<float>(
static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3)));
const float ymin_j = Eigen::numext::mini<float>(
static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2)));
const float xmin_j = Eigen::numext::mini<float>(
static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3)));
const float ymax_j = Eigen::numext::maxi<float>(
static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2)));
const float xmax_j = Eigen::numext::maxi<float>(
static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3)));
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
if (area_i <= 0 || area_j <= 0) {
return 0.0;
}
const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j);
const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j);
const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j);
const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j);
const float intersection_area =
Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) *
Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
static inline float IOU(const float* boxes, int i, int j) {
const float ymin_i = Eigen::numext::mini<float>(boxes[i], boxes[i + 2]);
const float xmin_i = Eigen::numext::mini<float>(boxes[i + 1], boxes[i + 3]);
const float ymax_i = Eigen::numext::maxi<float>(boxes[i], boxes[i + 2]);
const float xmax_i = Eigen::numext::maxi<float>(boxes[i + 1], boxes[i + 3]);
const float ymin_j = Eigen::numext::mini<float>(boxes[j], boxes[j + 2]);
const float xmin_j = Eigen::numext::mini<float>(boxes[j + 1], boxes[j + 3]);
const float ymax_j = Eigen::numext::maxi<float>(boxes[j], boxes[j + 2]);
const float xmax_j = Eigen::numext::maxi<float>(boxes[j + 1], boxes[j + 3]);
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
if (area_i <= 0 || area_j <= 0) {
return 0.0;
}
const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j);
const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j);
const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j);
const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j);
const float intersection_area =
Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) *
Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
template <typename T>
static inline T Overlap(typename TTypes<T, 2>::ConstTensor overlaps, int i,
int j) {
return overlaps(i, j);
}
template <typename T>
static inline std::function<float(int, int)> CreateIOUSimilarityFn(
const Tensor& boxes) {
typename TTypes<T, 2>::ConstTensor boxes_data = boxes.tensor<T, 2>();
return std::bind(&IOU<T>, boxes_data, std::placeholders::_1,
std::placeholders::_2);
}
template <typename T>
static inline std::function<T(int, int)> CreateOverlapSimilarityFn(
const Tensor& overlaps) {
typename TTypes<T, 2>::ConstTensor overlaps_data =
overlaps.tensor<float, 2>();
return std::bind(&Overlap<T>, overlaps_data, std::placeholders::_1,
std::placeholders::_2);
}
template <typename T>
void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores,
int num_boxes, const Tensor& max_output_size,
const T similarity_threshold,
const T score_threshold, const T soft_nms_sigma,
const std::function<float(int, int)>& similarity_fn,
bool return_scores_tensor = false,
bool pad_to_max_output_size = false,
int* ptr_num_valid_outputs = nullptr) {
const int output_size = max_output_size.scalar<int>()();
OP_REQUIRES(context, output_size >= 0,
errors::InvalidArgument("output size must be non-negative"));
std::vector<T> scores_data(num_boxes);
std::copy_n(scores.flat<T>().data(), num_boxes, scores_data.begin());
struct Candidate {
int box_index;
T score;
int suppress_begin_index;
};
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return ((bs_i.score == bs_j.score) && (bs_i.box_index > bs_j.box_index)) ||
bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::deque<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
for (int i = 0; i < scores_data.size(); ++i) {
if (scores_data[i] > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, scores_data[i], 0}));
}
}
T scale = static_cast<T>(0.0);
bool is_soft_nms = soft_nms_sigma > static_cast<T>(0.0);
if (is_soft_nms) {
scale = static_cast<T>(-0.5) / soft_nms_sigma;
}
auto suppress_weight = [similarity_threshold, scale,
is_soft_nms](const T sim) {
const T weight = Eigen::numext::exp<T>(scale * sim * sim);
return is_soft_nms || sim <= similarity_threshold ? weight
: static_cast<T>(0.0);
};
std::vector<int> selected;
std::vector<T> selected_scores;
float similarity;
T original_score;
Candidate next_candidate;
while (selected.size() < output_size && !candidate_priority_queue.empty()) {
next_candidate = candidate_priority_queue.top();
original_score = next_candidate.score;
candidate_priority_queue.pop();
bool should_hard_suppress = false;
for (int j = static_cast<int>(selected.size()) - 1;
j >= next_candidate.suppress_begin_index; --j) {
similarity = similarity_fn(next_candidate.box_index, selected[j]);
next_candidate.score *= suppress_weight(static_cast<T>(similarity));
if (!is_soft_nms && static_cast<T>(similarity) > similarity_threshold) {
should_hard_suppress = true;
break;
}
if (next_candidate.score <= score_threshold) break;
}
next_candidate.suppress_begin_index = selected.size();
if (!should_hard_suppress) {
if (next_candidate.score == original_score) {
selected.push_back(next_candidate.box_index);
selected_scores.push_back(next_candidate.score);
continue;
}
if (next_candidate.score > score_threshold) {
candidate_priority_queue.push(next_candidate);
}
}
}
int num_valid_outputs = selected.size();
if (pad_to_max_output_size) {
selected.resize(output_size, 0);
selected_scores.resize(output_size, static_cast<T>(0));
}
if (ptr_num_valid_outputs) {
*ptr_num_valid_outputs = num_valid_outputs;
}
Tensor* output_indices = nullptr;
TensorShape output_shape({static_cast<int>(selected.size())});
OP_REQUIRES_OK(context,
context->allocate_output(0, output_shape, &output_indices));
TTypes<int, 1>::Tensor output_indices_data = output_indices->tensor<int, 1>();
std::copy_n(selected.begin(), selected.size(), output_indices_data.data());
if (return_scores_tensor) {
Tensor* output_scores = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(1, output_shape, &output_scores));
typename TTypes<T, 1>::Tensor output_scores_data =
output_scores->tensor<T, 1>();
std::copy_n(selected_scores.begin(), selected_scores.size(),
output_scores_data.data());
}
}
struct ResultCandidate {
int box_index;
float score;
int class_idx;
float box_coord[4];
};
void DoNMSPerClass(int batch_idx, int class_idx, const float* boxes_data,
const float* scores_data, int num_boxes, int q,
int num_classes, const int size_per_class,
const float score_threshold, const float iou_threshold,
std::vector<ResultCandidate>& result_candidate_vec) {
struct Candidate {
int box_index;
float score;
};
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::vector<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
float temp_score;
for (int i = 0; i < num_boxes; ++i) {
temp_score = scores_data[i * num_classes + class_idx];
if (temp_score > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, temp_score}));
}
}
std::vector<int> selected;
Candidate next_candidate;
int candidate_box_data_idx, selected_box_data_idx, class_box_idx;
class_box_idx = (q > 1) ? class_idx : 0;
float iou;
while (selected.size() < size_per_class &&
!candidate_priority_queue.empty()) {
next_candidate = candidate_priority_queue.top();
candidate_priority_queue.pop();
candidate_box_data_idx = (next_candidate.box_index * q + class_box_idx) * 4;
bool should_select = true;
for (int j = selected.size() - 1; j >= 0; --j) {
selected_box_data_idx = (selected[j] * q + class_box_idx) * 4;
iou = IOU(boxes_data, candidate_box_data_idx, selected_box_data_idx);
if (iou > iou_threshold) {
should_select = false;
break;
}
}
if (should_select) {
result_candidate_vec[selected.size() + size_per_class * class_idx] = {
next_candidate.box_index,
next_candidate.score,
class_idx,
{boxes_data[candidate_box_data_idx],
boxes_data[candidate_box_data_idx + 1],
boxes_data[candidate_box_data_idx + 2],
boxes_data[candidate_box_data_idx + 3]}};
selected.push_back(next_candidate.box_index);
}
}
}
void SelectResultPerBatch(std::vector<float>& nmsed_boxes,
std::vector<float>& nmsed_scores,
std::vector<float>& nmsed_classes,
std::vector<ResultCandidate>& result_candidate_vec,
std::vector<int>& final_valid_detections,
const int batch_idx, int total_size_per_batch,
bool pad_per_class, int max_size_per_batch,
bool clip_boxes, int per_batch_size) {
auto rc_cmp = [](const ResultCandidate rc_i, const ResultCandidate rc_j) {
return rc_i.score > rc_j.score;
};
std::sort(result_candidate_vec.begin(), result_candidate_vec.end(), rc_cmp);
int max_detections = 0;
int result_candidate_size =
std::count_if(result_candidate_vec.begin(), result_candidate_vec.end(),
[](ResultCandidate rc) { return rc.box_index > -1; });
if (!pad_per_class) {
max_detections = std::min(result_candidate_size, total_size_per_batch);
} else {
max_detections = std::min(per_batch_size, result_candidate_size);
}
final_valid_detections[batch_idx] = max_detections;
int curr_total_size = max_detections;
int result_idx = 0;
while (curr_total_size > 0 && result_idx < result_candidate_vec.size()) {
ResultCandidate next_candidate = result_candidate_vec[result_idx++];
if (clip_boxes) {
const float box_min = 0.0;
const float box_max = 1.0;
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[0], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[1], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[2], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[3], box_max), box_min));
} else {
nmsed_boxes.push_back(next_candidate.box_coord[0]);
nmsed_boxes.push_back(next_candidate.box_coord[1]);
nmsed_boxes.push_back(next_candidate.box_coord[2]);
nmsed_boxes.push_back(next_candidate.box_coord[3]);
}
nmsed_scores.push_back(next_candidate.score);
nmsed_classes.push_back(next_candidate.class_idx);
curr_total_size--;
}
nmsed_boxes.resize(per_batch_size * 4, 0);
nmsed_scores.resize(per_batch_size, 0);
nmsed_classes.resize(per_batch_size, 0);
}
void BatchedNonMaxSuppressionOp(
OpKernelContext* context, const Tensor& inp_boxes, const Tensor& inp_scores,
int num_boxes, const int max_size_per_class, const int total_size_per_batch,
const float score_threshold, const float iou_threshold,
bool pad_per_class = false, bool clip_boxes = true) {
const int num_batches = inp_boxes.dim_size(0);
int num_classes = inp_scores.dim_size(2);
int q = inp_boxes.dim_size(2);
const float* scores_data =
const_cast<float*>(inp_scores.flat<float>().data());
const float* boxes_data = const_cast<float*>(inp_boxes.flat<float>().data());
int boxes_per_batch = num_boxes * q * 4;
int scores_per_batch = num_boxes * num_classes;
const int size_per_class = std::min(max_size_per_class, num_boxes);
std::vector<std::vector<ResultCandidate>> result_candidate_vec(
num_batches,
std::vector<ResultCandidate>(size_per_class * num_classes,
{-1, -1.0, -1, {0.0, 0.0, 0.0, 0.0}}));
std::vector<std::vector<float>> nmsed_boxes(num_batches);
std::vector<std::vector<float>> nmsed_scores(num_batches);
std::vector<std::vector<float>> nmsed_classes(num_batches);
std::vector<int> final_valid_detections(num_batches);
auto shard_nms = [&](int begin, int end) {
for (int idx = begin; idx < end; ++idx) {
int batch_idx = idx / num_classes;
int class_idx = idx % num_classes;
DoNMSPerClass(batch_idx, class_idx,
boxes_data + boxes_per_batch * batch_idx,
scores_data + scores_per_batch * batch_idx, num_boxes, q,
num_classes, size_per_class, score_threshold, iou_threshold,
result_candidate_vec[batch_idx]);
}
};
int length = num_batches * num_classes;
int input_bytes = num_boxes * 10 * sizeof(float);
int output_bytes = num_boxes * 10 * sizeof(float);
int compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 14 +
Eigen::TensorOpCost::MulCost<int>() * num_boxes * 9 +
Eigen::TensorOpCost::MulCost<float>() * num_boxes * 9 +
Eigen::TensorOpCost::AddCost<float>() * num_boxes * 8;
const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles);
const CPUDevice& d = context->eigen_device<CPUDevice>();
d.parallelFor(length, cost, shard_nms);
int per_batch_size = total_size_per_batch;
int max_total_size = static_cast<int>(
std::min(static_cast<int64_t>(std::numeric_limits<int>::max()),
static_cast<int64_t>(max_size_per_class) * num_classes));
if (pad_per_class) {
per_batch_size = std::min(total_size_per_batch, max_total_size);
}
Tensor* valid_detections_t = nullptr;
TensorShape valid_detections_shape({num_batches});
OP_REQUIRES_OK(context, context->allocate_output(3, valid_detections_shape,
&valid_detections_t));
auto valid_detections_flat = valid_detections_t->template flat<int>();
auto shard_result = [&](int begin, int end) {
for (int batch_idx = begin; batch_idx < end; ++batch_idx) {
SelectResultPerBatch(
nmsed_boxes[batch_idx], nmsed_scores[batch_idx],
nmsed_classes[batch_idx], result_candidate_vec[batch_idx],
final_valid_detections, batch_idx, total_size_per_batch,
pad_per_class, max_total_size, clip_boxes, per_batch_size);
valid_detections_flat(batch_idx) = final_valid_detections[batch_idx];
}
};
length = num_batches;
input_bytes =
num_boxes * 10 * sizeof(float) + per_batch_size * 6 * sizeof(float);
output_bytes =
num_boxes * 5 * sizeof(float) + per_batch_size * 6 * sizeof(float);
compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 5 +
Eigen::TensorOpCost::AddCost<float>() * num_boxes * 5;
const Eigen::TensorOpCost cost_result(input_bytes, output_bytes,
compute_cycles);
d.parallelFor(length, cost_result, shard_result);
Tensor* nmsed_boxes_t = nullptr;
TensorShape boxes_shape({num_batches, per_batch_size, 4});
OP_REQUIRES_OK(context,
context->allocate_output(0, boxes_shape, &nmsed_boxes_t));
auto nmsed_boxes_flat = nmsed_boxes_t->template flat<float>();
Tensor* nmsed_scores_t = nullptr;
TensorShape scores_shape({num_batches, per_batch_size});
OP_REQUIRES_OK(context,
context->allocate_output(1, scores_shape, &nmsed_scores_t));
auto nmsed_scores_flat = nmsed_scores_t->template flat<float>();
Tensor* nmsed_classes_t = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(2, scores_shape, &nmsed_classes_t));
auto nmsed_classes_flat = nmsed_classes_t->template flat<float>();
auto shard_copy_result = [&](int begin, int end) {
for (int idx = begin; idx < end; ++idx) {
int batch_idx = idx / per_batch_size;
int j = idx % per_batch_size;
nmsed_scores_flat(idx) = nmsed_scores[batch_idx][j];
nmsed_classes_flat(idx) = nmsed_classes[batch_idx][j];
for (int k = 0; k < 4; ++k) {
nmsed_boxes_flat(idx * 4 + k) = nmsed_boxes[batch_idx][j * 4 + k];
}
}
};
length = num_batches * per_batch_size;
input_bytes = 6 * sizeof(float);
output_bytes = 6 * sizeof(float);
compute_cycles = Eigen::TensorOpCost::AddCost<int>() * 2 +
Eigen::TensorOpCost::MulCost<int>() * 2 +
Eigen::TensorOpCost::DivCost<float>() * 2;
const Eigen::TensorOpCost cost_copy_result(input_bytes, output_bytes,
compute_cycles);
d.parallelFor(length, cost_copy_result, shard_copy_result);
}
template <typename T>
T GetScalar(const Tensor& tensor) {
switch (tensor.dtype()) {
case DT_FLOAT:
return static_cast<T>(tensor.scalar<float>()());
case DT_DOUBLE:
return static_cast<T>(tensor.scalar<double>()());
case DT_BFLOAT16:
return static_cast<T>(tensor.scalar<Eigen::bfloat16>()());
case DT_HALF:
return static_cast<T>(tensor.scalar<Eigen::half>()());
default:
DCHECK(false) << "Unsupported type " << tensor.dtype();
break;
}
return static_cast<T>(0);
}
}
template <typename Device>
class NonMaxSuppressionOp : public OpKernel {
public:
explicit NonMaxSuppressionOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("iou_threshold", &iou_threshold_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
OP_REQUIRES(context, iou_threshold_ >= 0 && iou_threshold_ <= 1,
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<float>(boxes);
const float score_threshold_val = std::numeric_limits<float>::lowest();
const float dummy_soft_nms_sigma = static_cast<float>(0.0);
DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size,
iou_threshold_, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
private:
float iou_threshold_;
};
template <typename Device, typename T>
class NonMaxSuppressionV2Op : public OpKernel {
public:
explicit NonMaxSuppressionV2Op(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const T iou_threshold_val = GetScalar<T>(iou_threshold);
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
const T score_threshold_val = std::numeric_limits<T>::lowest();
const T dummy_soft_nms_sigma = static_cast<T>(0.0);
DoNonMaxSuppressionOp<T>(context, scores, num_boxes, max_output_size,
iou_threshold_val, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
};
template <typename Device, typename T>
class NonMaxSuppressionV3Op : public OpKernel {
public:
explicit NonMaxSuppressionV3Op(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString(),
" (Shape must be rank 0 but is ", "rank ",
max_output_size.dims(), ")"));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString(),
" (Shape must be rank 0 but is rank ",
iou_threshold.dims(), ")"));
const T iou_threshold_val = GetScalar<T>(iou_threshold);
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const T score_threshold_val = GetScalar<T>(score_threshold);
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
const T dummy_soft_nms_sigma = static_cast<T>(0.0);
DoNonMaxSuppressionOp<T>(context, scores, num_boxes, max_output_size,
iou_threshold_val, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
};
template <typename Device, typename T>
class NonMaxSuppressionV4Op : public OpKernel {
public:
explicit NonMaxSuppressionV4Op(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size",
&pad_to_max_output_size_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const T iou_threshold_val = GetScalar<T>(iou_threshold);
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const T score_threshold_val = GetScalar<T>(score_threshold);
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
int num_valid_outputs;
bool return_scores_tensor_ = false;
const T dummy_soft_nms_sigma = static_cast<T>(0.0);
DoNonMaxSuppressionOp<T>(
context, scores, num_boxes, max_output_size, iou_threshold_val,
score_threshold_val, dummy_soft_nms_sigma, similarity_fn,
return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs);
if (!context->status().ok()) {
return;
}
Tensor* num_outputs_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
1, tensorflow::TensorShape{}, &num_outputs_t));
num_outputs_t->scalar<int32>().setConstant(num_valid_outputs);
}
private:
bool pad_to_max_output_size_;
};
template <typename Device, typename T>
class NonMaxSuppressionV5Op : public OpKernel {
public:
explicit NonMaxSuppressionV5Op(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size",
&pad_to_max_output_size_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const T iou_threshold_val = iou_threshold.scalar<T>()();
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const T score_threshold_val = score_threshold.scalar<T>()();
const Tensor& soft_nms_sigma = context->input(5);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(soft_nms_sigma.shape()),
errors::InvalidArgument("soft_nms_sigma must be 0-D, got shape ",
soft_nms_sigma.shape().DebugString()));
const T soft_nms_sigma_val = soft_nms_sigma.scalar<T>()();
OP_REQUIRES(context, soft_nms_sigma_val >= static_cast<T>(0.0),
errors::InvalidArgument("soft_nms_sigma_val must be >= 0"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
int num_valid_outputs;
const bool return_scores_tensor_ = true;
DoNonMaxSuppressionOp<T>(
context, scores, num_boxes, max_output_size, iou_threshold_val,
score_threshold_val, soft_nms_sigma_val, similarity_fn,
return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs);
if (!context->status().ok()) {
return;
}
Tensor* num_outputs_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
2, tensorflow::TensorShape{}, &num_outputs_t));
num_outputs_t->scalar<int32>().setConstant(num_valid_outputs);
}
private:
bool pad_to_max_output_size_;
};
template <typename Device>
class NonMaxSuppressionWithOverlapsOp : public OpKernel {
public:
explicit NonMaxSuppressionWithOverlapsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& overlaps = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& overlap_threshold = context->input(3);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(overlap_threshold.shape()),
errors::InvalidArgument("overlap_threshold must be 0-D, got shape ",
overlap_threshold.shape().DebugString()));
const float overlap_threshold_val = overlap_threshold.scalar<float>()();
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const float score_threshold_val = score_threshold.scalar<float>()();
int num_boxes = 0;
ParseAndCheckOverlapSizes(context, overlaps, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateOverlapSimilarityFn<float>(overlaps);
const float dummy_soft_nms_sigma = static_cast<float>(0.0);
DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size,
overlap_threshold_val, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
};
template <typename Device>
class CombinedNonMaxSuppressionOp : public OpKernel {
public:
explicit CombinedNonMaxSuppressionOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_per_class", &pad_per_class_));
OP_REQUIRES_OK(context, context->GetAttr("clip_boxes", &clip_boxes_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
OP_REQUIRES(
context, (boxes.dim_size(0) == scores.dim_size(0)),
errors::InvalidArgument("boxes and scores must have same batch size"));
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_size_per_class must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const int max_size_per_class = max_output_size.scalar<int>()();
OP_REQUIRES(context, max_size_per_class > 0,
errors::InvalidArgument("max_size_per_class must be positive"));
const Tensor& max_total_size = context->input(3);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_total_size.shape()),
errors::InvalidArgument("max_total_size must be 0-D, got shape ",
max_total_size.shape().DebugString()));
const int max_total_size_per_batch = max_total_size.scalar<int>()();
OP_REQUIRES(context, max_total_size_per_batch > 0,
errors::InvalidArgument("max_total_size must be > 0"));
if (max_total_size_per_batch > pow(10, 6)) {
LOG(WARNING) << "Detected a large value for `max_total_size`. This may "
<< "cause OOM error. (max_total_size: "
<< max_total_size.scalar<int>()() << ")";
}
const Tensor& iou_threshold = context->input(4);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const float iou_threshold_val = iou_threshold.scalar<float>()();
const Tensor& score_threshold = context->input(5);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const float score_threshold_val = score_threshold.scalar<float>()();
OP_REQUIRES(context, iou_threshold_val >= 0 && iou_threshold_val <= 1,
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
const int num_classes = scores.dim_size(2);
ParseAndCheckCombinedNMSBoxSizes(context, boxes, &num_boxes, num_classes);
CheckCombinedNMSScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
BatchedNonMaxSuppressionOp(context, boxes, scores, num_boxes,
max_size_per_class, max_total_size_per_batch,
score_threshold_val, iou_threshold_val,
pad_per_class_, clip_boxes_);
}
private:
bool pad_per_class_;
bool clip_boxes_;
};
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppression").Device(DEVICE_CPU),
NonMaxSuppressionOp<CPUDevice>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV2").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV2Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV2")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV2Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV3").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV3Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV3")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV3Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV4").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV4Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV4")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV4Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV5").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV5Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV5")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV5Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionWithOverlaps").Device(DEVICE_CPU),
NonMaxSuppressionWithOverlapsOp<CPUDevice>);
REGISTER_KERNEL_BUILDER(Name("CombinedNonMaxSuppression").Device(DEVICE_CPU),
CombinedNonMaxSuppressionOp<CPUDevice>);
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class NonMaxSuppressionOpTest : public OpsTestBase {
protected:
void MakeOp(float iou_threshold) {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppression")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("iou_threshold", iou_threshold)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({6, 4}),
{1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectWithNegativeScores) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(
TensorShape({6}), {.9f - 10.0f, .75f - 10.0f, .6f - 10.0f, .95f - 10.0f,
.5f - 10.0f, .3f - 10.0f});
AddInputFromArray<int>(TensorShape({}), {6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestFirstBoxDegenerate) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({3, 4}),
{0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3});
AddInputFromArray<float>(TensorShape({3}), {.9f, .75f, .6f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {0, 1, 2});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectSingleBox) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp(.5);
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionOpTest, TestInvalidIOUThreshold) {
MakeOp(1.2);
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TEST_F(NonMaxSuppressionOpTest, TestEmptyInput) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
class NonMaxSuppressionV2OpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp();
AddInputFromArray<float>(TensorShape({6, 4}),
{1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectSingleBox) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp();
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionV2OpTest, TestInvalidIOUThreshold) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {1.2f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TEST_F(NonMaxSuppressionV2OpTest, TestEmptyInput) {
MakeOp();
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
using NmsValidTypes =
::testing::Types<std::pair<float, float>, std::pair<float, Eigen::half>,
std::pair<Eigen::half, Eigen::half>,
std::pair<Eigen::half, float> >;
template <typename InputAndThresholdTypes>
class NonMaxSuppressionV3OpTest : public OpsTestBase {
protected:
using InputType = typename InputAndThresholdTypes::first_type;
using ThresholdType = typename InputAndThresholdTypes::second_type;
void MakeOp() {
constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value;
constexpr DataType kThresholdDataType =
DataTypeToEnum<ThresholdType>::value;
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV3")
.Input(FakeInput(kInputDataType))
.Input(FakeInput(kInputDataType))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(kThresholdDataType))
.Input(FakeInput(kThresholdDataType))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TYPED_TEST_SUITE(NonMaxSuppressionV3OpTest, NmsValidTypes);
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersWithScoreThreshold) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersWithScoreThresholdZeroScores) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType, float>(TensorShape({6}),
{.1, 0, 0, .3, .2, -5.0});
this->template AddInputFromList<int>(TensorShape({}), {6});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {-3.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}), {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectAtMostTwoBoxesFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {2});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectSingleBox) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1});
this->template AddInputFromList<InputType>(TensorShape({1}), {.9f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromTenIdenticalBoxes) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
int num_boxes = 10;
std::vector<InputType> corners(num_boxes * 4);
std::vector<InputType> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = static_cast<InputType>(0);
corners[i * 4 + 1] = static_cast<InputType>(0);
corners[i * 4 + 2] = static_cast<InputType>(1);
corners[i * 4 + 3] = static_cast<InputType>(1);
scores[i] = static_cast<InputType>(.9);
}
this->template AddInputFromArray<InputType>(TensorShape({num_boxes, 4}),
corners);
this->template AddInputFromArray<InputType>(TensorShape({num_boxes}), scores);
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestInconsistentBoxAndScoreShapes) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({5}),
{.9f, .75f, .6f, .95f, .5f});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
Status s = this->RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestInvalidIOUThreshold) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1});
this->template AddInputFromList<InputType>(TensorShape({1}), {.9f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {1.2f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
Status s = this->RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestEmptyInput) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromArray<InputType>(TensorShape({0, 4}), {});
this->template AddInputFromArray<InputType>(TensorShape({0}), {});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
template <typename InputAndThresholdTypes>
class NonMaxSuppressionV4OpTest : public OpsTestBase {
protected:
using InputType = typename InputAndThresholdTypes::first_type;
using ThresholdType = typename InputAndThresholdTypes::second_type;
void MakeOp() {
constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value;
constexpr DataType kThresholdDataType =
DataTypeToEnum<ThresholdType>::value;
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV4")
.Input(FakeInput(kInputDataType))
.Input(FakeInput(kInputDataType))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(kThresholdDataType))
.Input(FakeInput(kThresholdDataType))
.Attr("pad_to_max_output_size", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TYPED_TEST_SUITE(NonMaxSuppressionV4OpTest, NmsValidTypes);
TYPED_TEST(NonMaxSuppressionV4OpTest, TestSelectFromThreeClustersPadFive) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0)));
Tensor expected_num_valid = test::AsScalar<int>(3);
test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1)));
}
TYPED_TEST(NonMaxSuppressionV4OpTest,
TestSelectFromThreeClustersPadFiveScoreThr) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {6});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f});
TF_ASSERT_OK(this->RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0)));
Tensor expected_num_valid = test::AsScalar<int>(2);
test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1)));
}
class NonMaxSuppressionV5OpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV5")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("pad_to_max_output_size", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersPadFive) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *GetOutput(0));
const auto expected_scores =
test::AsTensor<float>({.95f, .9f, .3f, 0.0f, 0.0f});
test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2);
Tensor expected_num_valid = test::AsScalar<int>(3);
test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2));
}
TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersWithSoftNMS) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {6});
AddInputFromArray<float>(TensorShape({}), {0.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {0.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int>(&expected, {3, 0, 1, 5, 4, 2});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected_scores,
{0.95, 0.9, 0.384, 0.3, 0.256, 0.197});
test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2);
Tensor expected_num_valid = test::AsScalar<int>(6);
test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2));
}
class NonMaxSuppressionWithOverlapsOpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op",
"NonMaxSuppressionWithOverlaps")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void AddIoUInput(const std::vector<float>& boxes) {
ASSERT_EQ((boxes.size() % 4), 0);
size_t num_boxes = boxes.size() / 4;
std::vector<float> iou_overlaps(num_boxes * num_boxes);
auto corner_access = [&boxes](size_t box_idx, size_t corner_idx) {
return boxes[box_idx * 4 + corner_idx];
};
for (size_t i = 0; i < num_boxes; ++i) {
for (size_t j = 0; j < num_boxes; ++j) {
const float ymin_i =
std::min<float>(corner_access(i, 0), corner_access(i, 2));
const float xmin_i =
std::min<float>(corner_access(i, 1), corner_access(i, 3));
const float ymax_i =
std::max<float>(corner_access(i, 0), corner_access(i, 2));
const float xmax_i =
std::max<float>(corner_access(i, 1), corner_access(i, 3));
const float ymin_j =
std::min<float>(corner_access(j, 0), corner_access(j, 2));
const float xmin_j =
std::min<float>(corner_access(j, 1), corner_access(j, 3));
const float ymax_j =
std::max<float>(corner_access(j, 0), corner_access(j, 2));
const float xmax_j =
std::max<float>(corner_access(j, 1), corner_access(j, 3));
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
float iou;
if (area_i <= 0 || area_j <= 0) {
iou = 0.0;
} else {
const float intersection_ymin = std::max<float>(ymin_i, ymin_j);
const float intersection_xmin = std::max<float>(xmin_i, xmin_j);
const float intersection_ymax = std::min<float>(ymax_i, ymax_j);
const float intersection_xmax = std::min<float>(xmax_i, xmax_j);
const float intersection_area =
std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
std::max<float>(intersection_xmax - intersection_xmin, 0.0);
iou = intersection_area / (area_i + area_j - intersection_area);
}
iou_overlaps[i * num_boxes + j] = iou;
}
}
AddInputFromArray<float>(TensorShape({static_cast<signed>(num_boxes),
static_cast<signed>(num_boxes)}),
iou_overlaps);
}
};
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromThreeClusters) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp();
AddIoUInput({1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest,
TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectSingleBox) {
MakeOp();
AddIoUInput({0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp();
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddIoUInput(corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInvalidOverlapsShape) {
MakeOp();
AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
AddInputFromArray<float>(TensorShape({2}), {0.5f, 0.5f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {0.f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "overlaps must be square")) << s;
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdGreaterOne) {
MakeOp();
AddIoUInput({0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {1.2f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdSmallerZero) {
MakeOp();
AddIoUInput({0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {-0.2f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestEmptyInput) {
MakeOp();
AddIoUInput({});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
class CombinedNonMaxSuppressionOpTest : public OpsTestBase {
protected:
void MakeOp(bool pad_per_class = false, bool clip_boxes = true) {
TF_EXPECT_OK(NodeDefBuilder("combined_non_max_suppression_op",
"CombinedNonMaxSuppression")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("pad_per_class", pad_per_class)
.Attr("clip_boxes", clip_boxes)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(CombinedNonMaxSuppressionOpTest, TestEmptyInput) {
MakeOp();
AddInputFromArray<float>(TensorShape({0, 0, 0, 4}), {});
AddInputFromArray<float>(TensorShape({0, 0, 0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<int>(TensorShape({}), {10});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({0, 10, 4}));
test::FillValues<float>(&expected_boxes, {});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({0, 10}));
test::FillValues<float>(&expected_scores, {});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({0, 10}));
test::FillValues<float>(&expected_classes, {});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected_valid_d, {});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({1, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.3, 1, 0.4});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.3});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromThreeClustersNoBoxClipping) {
MakeOp(false, false);
AddInputFromArray<float>(TensorShape({1, 6, 1, 4}),
{0, 0, 10, 10, 0, 1, 10, 11, 0, 1, 10, 9,
0, 11, 10, 20, 0, 12, 10, 21, 0, 30, 100, 40});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 11, 10, 20, 0, 0, 10, 10, 0, 30, 100, 40});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.3});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromThreeClustersWithScoreThreshold) {
MakeOp();
AddInputFromArray<float>(
TensorShape({1, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.4f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromThreeClustersWithScoreThresholdZeroScores) {
MakeOp();
AddInputFromArray<float>(
TensorShape({1, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.1f, 0, 0, .3f, .2f, -5.0f});
AddInputFromArray<int>(TensorShape({}), {4});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {-3.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 5, 4}));
test::FillValues<float>(
&expected_boxes,
{
0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 5}));
test::FillValues<float>(&expected_scores, {0.3, 0.1, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 5}));
test::FillValues<float>(&expected_classes, {0, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectSingleBox) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 1, 1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1, 1, 1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 1, 4}));
test::FillValues<float>(&expected_boxes, {0, 0, 1, 1});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 1}));
test::FillValues<float>(&expected_scores, {0.9});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 1}));
test::FillValues<float>(&expected_classes, {0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {1});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesWithScoreThreshold) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(
TensorShape({2, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f, .9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.4f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClasses) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(
&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.75, 0.95, 0.9, 0.75});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {3, 3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesWithScoreThreshold) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.8f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedTotalSize) {
MakeOp(true);
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {10});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.8f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedPerClass) {
MakeOp(true);
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {2});
AddInputFromArray<int>(TensorShape({}), {50});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.8f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 4, 4}));
test::FillValues<float>(
&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0, 0.95, 0.9, 0, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 0, 1, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesTotalSize) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.1f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 5, 4}));
test::FillValues<float>(
&expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f,
0.1, 0.11f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f,
0.2, 0.22f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 5}));
test::FillValues<float>(
&expected_scores, {0.95, 0.9, 0.75, 0.5, 0.3, 0.95, 0.9, 0.75, 0.5, 0.3});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 5}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 1, 0, 0, 1, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {5, 5});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesForBoxesAndScores) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 2, 4}),
{0, 0, 0.1, 0.1, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, 0.6f, 0.1, 0.7f,
0, -0.01, 0.1, 0.09f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.11,
0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0,
0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, 0.02f, 0.2,
0.22f, 0, -0.02, 0.2, 0.19f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0,
0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1,
0.5, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(
&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.6f, 0.1, 0.7f,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.8, 0.95, 0.9, 0.75});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 1, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {3, 3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
59767d51-6ab6-44cc-b43f-0cb7e7eb707a | cpp | tensorflow/tensorflow | sampling_kernels | tensorflow/core/kernels/image/sampling_kernels.cc | tensorflow/core/kernels/image/sampling_kernels_test.cc | #include "tensorflow/core/kernels/image/sampling_kernels.h"
#include <string>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace functor {
SamplingKernelType SamplingKernelTypeFromString(const StringPiece str) {
const string lower_case = absl::AsciiStrToLower(str);
if (lower_case == "lanczos1") return Lanczos1Kernel;
if (lower_case == "lanczos3") return Lanczos3Kernel;
if (lower_case == "lanczos5") return Lanczos5Kernel;
if (lower_case == "gaussian") return GaussianKernel;
if (lower_case == "box") return BoxKernel;
if (lower_case == "triangle") return TriangleKernel;
if (lower_case == "keyscubic") return KeysCubicKernel;
if (lower_case == "mitchellcubic") return MitchellCubicKernel;
return SamplingKernelTypeEnd;
}
}
} | #include "tensorflow/core/kernels/image/sampling_kernels.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace functor {
namespace {
class KernelsTest : public ::testing::Test {
protected:
template <typename KernelType>
void TestKernelValues(const KernelType& kernel, const std::vector<float>& x,
const std::vector<float>& expected) const {
ASSERT_EQ(x.size(), expected.size());
for (int i = 0; i < x.size(); ++i) {
constexpr float kTolerance = 1e-3;
EXPECT_NEAR(kernel(x[i]), expected[i], kTolerance);
EXPECT_NEAR(kernel(-x[i]), expected[i], kTolerance);
}
}
};
TEST_F(KernelsTest, TestKernelValues) {
TestKernelValues(CreateLanczos1Kernel(), {0.0f, 0.5f, 1.0f, 1.5},
{1.0f, 0.4052f, 0.0f, 0.0f});
TestKernelValues(CreateLanczos3Kernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5f, 3.5},
{1.0f, 0.6079f, 0.0f, -0.1351f, 0.0243f, 0.0f});
TestKernelValues(
CreateLanczos5Kernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5},
{1.0f, 0.6262f, 0.0f, -0.1822f, 0.0810569f, -0.0334f, 0.0077f, 0.0f});
TestKernelValues(CreateGaussianKernel(), {0.0f, 0.5f, 1.0f, 1.5},
{1.0f, 0.6065f, 0.1353f, 0.0f});
TestKernelValues(CreateBoxKernel(), {0.0f, 0.25f, 0.5f, 1.0f},
{1.0f, 1.0f, 0.5f, 0.0f});
TestKernelValues(CreateTriangleKernel(), {0.0f, 0.5f, 1.0f},
{1.0f, 0.5f, 0.0f});
TestKernelValues(CreateKeysCubicKernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5},
{1.0f, 0.5625f, 0.0f, -0.0625f, 0.0f});
TestKernelValues(CreateMitchellCubicKernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5},
{0.8889f, 0.5347f, 0.0556f, -0.0347f, 0.0f});
}
TEST(SamplingKernelTypeFromStringTest, Works) {
EXPECT_EQ(SamplingKernelTypeFromString("lanczos1"), Lanczos1Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("lanczos3"), Lanczos3Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("lanczos5"), Lanczos5Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("gaussian"), GaussianKernel);
EXPECT_EQ(SamplingKernelTypeFromString("box"), BoxKernel);
EXPECT_EQ(SamplingKernelTypeFromString("triangle"), TriangleKernel);
EXPECT_EQ(SamplingKernelTypeFromString("mitchellcubic"), MitchellCubicKernel);
EXPECT_EQ(SamplingKernelTypeFromString("keyscubic"), KeysCubicKernel);
EXPECT_EQ(SamplingKernelTypeFromString("not a kernel"),
SamplingKernelTypeEnd);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/sampling_kernels.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/sampling_kernels_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94107fc2-df0d-4834-b084-1a8bd6d86636 | cpp | tensorflow/tensorflow | resize_nearest_neighbor_op | tensorflow/core/kernels/image/resize_nearest_neighbor_op.cc | tensorflow/core/kernels/image/resize_nearest_neighbor_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/resize_nearest_neighbor_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class ResizeNearestNeighborOp : public OpKernel {
public:
explicit ResizeNearestNeighborOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
OP_REQUIRES(context, st.in_height < (1 << 24) && st.in_width < (1 << 24),
errors::InvalidArgument("nearest neighbor requires max height "
"& width of 2^24"));
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor input_data(
context->input(0).tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>());
bool status;
if (half_pixel_centers_) {
if (align_corners_) {
status = functor::ResizeNearestNeighbor<Device, T,
true,
true>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
} else {
status = functor::ResizeNearestNeighbor<Device, T,
true,
false>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
}
} else {
if (align_corners_) {
status = functor::ResizeNearestNeighbor<Device, T,
false,
true>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
} else {
status = functor::ResizeNearestNeighbor<Device, T,
false,
false>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
}
}
if (!status) {
context->SetStatus(
errors::Internal("Failed launching ResizeNearestNeighbor"));
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
template <bool half_pixel_centers>
struct BoolToScaler {};
struct HalfPixelScalerForNN {
inline float operator()(const int x, const float scale) const {
return (static_cast<float>(x) + 0.5f) * scale;
}
};
template <>
struct BoolToScaler<true> {
typedef HalfPixelScalerForNN Scaler;
};
template <>
struct BoolToScaler<false> {
typedef LegacyScaler Scaler;
};
namespace functor {
template <typename T, bool half_pixel_centers, bool align_corners>
struct ResizeNearestNeighbor<CPUDevice, T, half_pixel_centers, align_corners> {
bool operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output) {
typename BoolToScaler<half_pixel_centers>::Scaler scaler;
const Eigen::Index batch_size = input.dimension(0);
const Eigen::Index in_height = input.dimension(1);
const Eigen::Index in_width = input.dimension(2);
const Eigen::Index channels = input.dimension(3);
const Eigen::Index out_height = output.dimension(1);
const Eigen::Index out_width = output.dimension(2);
#ifdef PLATFORM_GOOGLE
for (Eigen::Index b = 0; b < batch_size; ++b) {
for (Eigen::Index y = 0; y < out_height; ++y) {
Eigen::Index in_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
in_height - 1);
if (half_pixel_centers) {
in_y = std::max(static_cast<Eigen::Index>(0), in_y);
}
for (Eigen::Index x = 0; x < out_width; ++x) {
Eigen::Index in_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
in_width - 1);
if (half_pixel_centers) {
in_x = std::max(static_cast<Eigen::Index>(0), in_x);
}
std::copy_n(&input(b, in_y, in_x, 0), channels, &output(b, y, x, 0));
}
}
}
#else
auto ParallelResize = [&](Eigen::Index start, Eigen::Index end) {
for (Eigen::Index b = start; b < end; ++b) {
Eigen::Index x = b % out_width;
Eigen::Index y = (b / out_width) % out_height;
Eigen::Index bs = (b / out_width) / out_height;
Eigen::Index in_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
in_height - 1);
if (half_pixel_centers) {
in_y = std::max(static_cast<Eigen::Index>(0), in_y);
}
Eigen::Index in_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
in_width - 1);
if (half_pixel_centers) {
in_x = std::max(static_cast<Eigen::Index>(0), in_x);
}
std::copy_n(&input(bs, in_y, in_x, 0), channels, &output(bs, y, x, 0));
}
};
Eigen::Index N = batch_size * out_height * out_width;
const int input_bytes = channels * sizeof(T);
const int output_bytes = channels * sizeof(T);
const int compute_cycles = (Eigen::TensorOpCost::ModCost<T>() * 2 +
Eigen::TensorOpCost::DivCost<T>() * 3 +
Eigen::TensorOpCost::AddCost<T>() * 2 +
Eigen::TensorOpCost::MulCost<T>() * 2);
const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles);
d.parallelFor(N, cost, ParallelResize);
#endif
return true;
}
};
}
template <typename Device, typename T>
class ResizeNearestNeighborOpGrad : public OpKernel {
public:
explicit ResizeNearestNeighborOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
const Tensor& shape_t = context->input(1);
OP_REQUIRES(context, shape_t.dims() == 1,
errors::InvalidArgument("shape_t must be 1-dimensional",
shape_t.shape().DebugString()));
OP_REQUIRES(context, shape_t.NumElements() == 2,
errors::InvalidArgument("shape_t must have two elements",
shape_t.shape().DebugString()));
auto sizes = shape_t.vec<int32>();
OP_REQUIRES(context, sizes(0) > 0 && sizes(1) > 0,
errors::InvalidArgument("shape_t's elements must be positive"));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"A deterministic GPU implementation of ResizeNearestNeighborGrad"
" is not currently available."));
}
const int64_t batch_size = input.dim_size(0);
const int64_t in_height = input.dim_size(1);
const int64_t in_width = input.dim_size(2);
const int64_t channels = input.dim_size(3);
const int64_t out_height = sizes(0);
const int64_t out_width = sizes(1);
Tensor* output = nullptr;
TensorShape shape;
OP_REQUIRES_OK(context,
TensorShape::BuildTensorShape(
{batch_size, out_height, out_width, channels}, &shape));
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output));
if (output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor input_data(input.tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(output->tensor<T, 4>());
const float height_scale =
CalculateResizeScale(out_height, in_height, align_corners_);
const float width_scale =
CalculateResizeScale(out_width, in_width, align_corners_);
bool status;
if (half_pixel_centers_) {
if (align_corners_) {
status = functor::ResizeNearestNeighborGrad<Device, T,
true,
true>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
} else {
status = functor::ResizeNearestNeighborGrad<Device, T,
true,
false>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
}
} else {
if (align_corners_) {
status =
functor::ResizeNearestNeighborGrad<Device, T,
false,
true>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
} else {
status =
functor::ResizeNearestNeighborGrad<Device, T,
false,
false>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
}
}
if (!status) {
context->SetStatus(
errors::Internal("Failed launching ResizeNearestNeighborGrad"));
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace functor {
template <typename T, bool half_pixel_centers, bool align_corners>
struct ResizeNearestNeighborGrad<CPUDevice, T, half_pixel_centers,
align_corners> {
bool operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output) {
typename BoolToScaler<half_pixel_centers>::Scaler scaler;
const Eigen::Index batch_size = input.dimension(0);
const Eigen::Index in_height = input.dimension(1);
const Eigen::Index in_width = input.dimension(2);
const Eigen::Index channels = input.dimension(3);
const Eigen::Index out_height = output.dimension(1);
const Eigen::Index out_width = output.dimension(2);
output.setZero();
for (Eigen::Index y = 0; y < in_height; ++y) {
const Eigen::Index out_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
out_height - 1);
for (Eigen::Index x = 0; x < in_width; ++x) {
const Eigen::Index out_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
out_width - 1);
for (Eigen::Index b = 0; b < batch_size; ++b) {
for (Eigen::Index c = 0; c < channels; ++c) {
output(b, out_y, out_x, c) += input(b, y, x, c);
}
}
}
}
return true;
}
};
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighbor") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighborGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOpGrad<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighbor") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOp<GPUDevice, T>); \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighborGrad") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOpGrad<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#endif
} | #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
enum class TestDevice { kCPU, kGPU };
class ResizeNearestNeighborOpTestBase
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
explicit ResizeNearestNeighborOpTestBase(bool half_pixel_centers)
: align_corners_(false), half_pixel_centers_(half_pixel_centers) {}
void SetUp() override {
if (GetParam() == TestDevice::kGPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {},
"/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_nn_op", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
bool align_corners_;
bool half_pixel_centers_;
};
class ResizeNearestNeighborOpTest : public ResizeNearestNeighborOpTestBase {
protected:
ResizeNearestNeighborOpTest() : ResizeNearestNeighborOpTestBase(false) {}
};
class ResizeNearestNeighborHalfPixelCentersOpTest
: public ResizeNearestNeighborOpTestBase {
protected:
ResizeNearestNeighborHalfPixelCentersOpTest()
: ResizeNearestNeighborOpTestBase(true) {}
};
class ResizeNearestNeighborOpAlignCornersTest
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
ResizeNearestNeighborOpAlignCornersTest() : align_corners_(true) {}
void SetUp() override {
if (GetParam() == TestDevice::kGPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {},
"/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_nn_op", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
bool align_corners_;
};
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearest2x2AlignCornersTo1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1, 2,
1, 1, 2,
3, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestAlignCorners2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestAlignCorners3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To2x5) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {2, 5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 5, 1}));
test::FillValues<float>(&expected,
{1, 1, 1, 2, 2,
3, 3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearestNeighbor4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 3,
5, 6, 7,
9, 10, 11});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestNeighborAlignCorners4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{ 1, 3, 4,
9, 11, 12,
13, 15, 16});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To5x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {5, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 5, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
1, 2,
1, 2,
3, 4,
3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2,
1, 1, 2, 2,
3, 3, 4, 4,
3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2x2x2To2x3x3x2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 2}),
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 2}));
test::FillValues<float>(&expected,
{1, 1, 1,
1, 2, 2,
1, 1, 1,
1, 2, 2,
3, 3, 3,
3, 4, 4,
5, 5, 5,
5, 6, 6,
5, 5, 5,
5, 6, 6,
7, 7, 7,
7, 8, 8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest5x2To2x2) {
AddInputFromArray<float>(TensorShape({1, 2, 5, 1}),
{1, 2, 3, 4, 5, 1, 2, 3, 4, 5});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected, {2, 4, 2, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To2x5) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {2, 5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 5, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2, 2,
3, 3, 4, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest,
TestNearestNeighbor4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 3, 4,
9, 11, 12,
13, 15, 16});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To5x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {5, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 5, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
1, 2,
3, 4,
3, 4,
3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2,
1, 1, 2, 2,
3, 3, 4, 4,
3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest,
TestNearest2x2x2x2To2x3x3x2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 2}),
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 2}));
test::FillValues<float>(&expected,
{1, 1, 2, 2, 2, 2,
3, 3, 4, 4, 4, 4,
3, 3, 4, 4, 4, 4,
5, 5, 6, 6, 6, 6,
7, 7, 8, 8, 8, 8,
7, 7, 8, 8, 8, 8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpTestCpu,
ResizeNearestNeighborOpTest,
::testing::Values(TestDevice::kCPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborHalfPixelCentersOpTestCpu,
ResizeNearestNeighborHalfPixelCentersOpTest,
::testing::Values(TestDevice::kCPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpAlignCornersTestCpu,
ResizeNearestNeighborOpAlignCornersTest,
::testing::Values(TestDevice::kCPU));
#if GOOGLE_CUDA
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpTestGpu,
ResizeNearestNeighborOpTest,
::testing::Values(TestDevice::kGPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborHalfPixelCentersOpTestGpu,
ResizeNearestNeighborHalfPixelCentersOpTest,
::testing::Values(TestDevice::kGPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpAlignCornersTestGpu,
ResizeNearestNeighborOpAlignCornersTest,
::testing::Values(TestDevice::kGPU));
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_nearest_neighbor_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_nearest_neighbor_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2a58f1bc-70e3-4496-9379-16d07d4ee87f | cpp | tensorflow/tensorflow | scale_and_translate_op | tensorflow/core/kernels/image/scale_and_translate_op.cc | tensorflow/core/kernels/image/scale_and_translate_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/scale_and_translate_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/image/sampling_kernels.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
using strings::Printf;
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace functor {
namespace {
template <typename T>
inline const T& Clamp(const T& low, const T& high, const T& value) {
if (high < value) return high;
if (value < low) return low;
return value;
}
template <typename Kernel>
Status ComputeSpansCore(OpKernelContext* context, const Kernel& kernel,
const int64_t output_size, const int64_t input_size,
const float scale, const float translate,
const bool antialias, Spans* spans) {
const float inv_scale = 1.0 / scale;
const float inv_translate = -inv_scale * translate;
const float kernel_scale = antialias ? std::max(inv_scale, 1.0f) : 1.0f;
spans->span_size = std::min(
2 * static_cast<int>(std::ceil(kernel.Radius() * kernel_scale)) + 1,
static_cast<int>(input_size));
AllocatorAttributes alloc_attr;
alloc_attr.set_on_host(true);
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_INT32, tensorflow::TensorShape({output_size}),
&spans->starts, alloc_attr));
auto starts_vec = spans->starts.vec<int32>();
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_FLOAT,
tensorflow::TensorShape({spans->span_size * output_size}),
&spans->weights, alloc_attr));
auto weights_vec = spans->weights.vec<float>();
weights_vec.setZero();
const float one_over_kernel_scale = 1.0f / kernel_scale;
int max_span_size = 0;
std::vector<float> temp_weights;
for (int x = 0; x < output_size; ++x) {
const float col_f = x + 0.5f;
const float sample_f = col_f * inv_scale + inv_translate;
if (sample_f < 0 || sample_f > input_size) {
starts_vec(x) = 0;
continue;
}
int64_t span_start =
std::ceil(sample_f - kernel.Radius() * kernel_scale - 0.5f);
int64_t span_end =
std::floor(sample_f + kernel.Radius() * kernel_scale - 0.5f);
span_start = Clamp(static_cast<int64_t>(0), input_size - 1, span_start);
span_end = Clamp(static_cast<int64_t>(0), input_size - 1, span_end) + 1;
const int this_span_size = span_end - span_start;
if (this_span_size > spans->span_size) {
return errors::Internal(Printf("Span is too large: %d vs %d.",
this_span_size, spans->span_size));
}
float total_weight_sum = 0.0f;
temp_weights.clear();
for (int source = span_start; source < span_end; ++source) {
float kernel_pos = static_cast<float>(source) + 0.5f - sample_f;
float weight = kernel(std::abs(kernel_pos * one_over_kernel_scale));
total_weight_sum += weight;
temp_weights.push_back(weight);
}
max_span_size = std::max(max_span_size, this_span_size);
if (std::abs(total_weight_sum) >=
1000.0f * std::numeric_limits<float>::min()) {
float one_over_total_weight_sum = 1.0f / total_weight_sum;
int out_index = spans->span_size * x;
for (float weight : temp_weights) {
weights_vec(out_index) = weight * one_over_total_weight_sum;
++out_index;
}
}
starts_vec(x) = span_start;
}
return absl::OkStatus();
}
Status ComputeGradSpansCore(OpKernelContext* context, const Spans& spans,
const int64_t forward_output_size,
const int64_t forward_input_size,
Spans* grad_spans) {
struct GradComponent {
int index;
float weight;
};
std::vector<std::vector<GradComponent>> grad_components(forward_input_size);
auto weights_vec = spans.weights.vec<float>();
auto starts_vec = spans.starts.vec<int32>();
for (int output_index = 0; output_index < forward_output_size;
++output_index) {
int input_index = starts_vec(output_index);
for (int j = 0; j < spans.span_size; ++j, ++input_index) {
const float weight = weights_vec(output_index * spans.span_size + j);
if (weight != 0.0f && input_index < forward_input_size) {
grad_components[input_index].push_back(
GradComponent{output_index, weight});
}
}
}
int max_size = 0;
for (std::vector<GradComponent>& gc : grad_components) {
if (!gc.empty()) {
std::sort(gc.begin(), gc.end(),
[](const GradComponent& x1, const GradComponent& x2) {
return x1.index < x2.index;
});
max_size = std::max(gc.back().index - gc.front().index + 1, max_size);
}
}
grad_spans->span_size = max_size;
AllocatorAttributes alloc_attr;
alloc_attr.set_on_host(true);
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_INT32, tensorflow::TensorShape({forward_input_size}),
&grad_spans->starts, alloc_attr));
auto grad_starts_vec = grad_spans->starts.vec<int32>();
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_FLOAT,
tensorflow::TensorShape({grad_spans->span_size * forward_input_size}),
&grad_spans->weights, alloc_attr));
auto grad_weights_vec = grad_spans->weights.vec<float>();
grad_weights_vec.setZero();
for (int input_index = 0; input_index < forward_input_size; ++input_index) {
if (!grad_components[input_index].empty()) {
const int start_span = grad_components[input_index].front().index;
grad_starts_vec(input_index) = start_span;
for (const GradComponent& gc : grad_components[input_index]) {
grad_weights_vec(input_index * grad_spans->span_size + gc.index -
start_span) += gc.weight;
}
} else {
grad_starts_vec(input_index) = 0;
}
}
return absl::OkStatus();
}
Status ComputeSpans(OpKernelContext* context,
const functor::SamplingKernelType kernel_type,
const int64_t output_size, const int64_t input_size,
const float scale, const float translate,
const bool antialias, Spans* spans) {
switch (kernel_type) {
case functor::Lanczos1Kernel: {
return ComputeSpansCore(context, CreateLanczos1Kernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::Lanczos3Kernel: {
return ComputeSpansCore(context, CreateLanczos3Kernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::Lanczos5Kernel: {
return ComputeSpansCore(context, CreateLanczos5Kernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::GaussianKernel: {
return ComputeSpansCore(context, CreateGaussianKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::BoxKernel: {
return ComputeSpansCore(context, CreateBoxKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::TriangleKernel: {
return ComputeSpansCore(context, CreateTriangleKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::KeysCubicKernel: {
return ComputeSpansCore(context, CreateKeysCubicKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::MitchellCubicKernel: {
return ComputeSpansCore(context, CreateMitchellCubicKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
default:
return errors::InvalidArgument(Printf("Unrecognized kernel type: %d",
static_cast<int>(kernel_type)));
}
return absl::OkStatus();
}
Status ComputeGradSpans(OpKernelContext* context,
const functor::SamplingKernelType kernel_type,
const int64_t forward_output_size,
const int64_t forward_input_size, const float scale,
const float translate, const bool antialias,
Spans* grad_spans) {
Spans spans;
TF_RETURN_IF_ERROR(ComputeSpans(context, kernel_type, forward_output_size,
forward_input_size, scale, translate,
antialias, &spans));
return ComputeGradSpansCore(context, spans, forward_output_size,
forward_input_size, grad_spans);
}
void GetValues(OpKernelContext* context, int input_index, float* v_1,
float* v_2) {
const Tensor& t = context->input(input_index);
OP_REQUIRES(context, t.dims() == 1,
errors::InvalidArgument("t must be 1-dimensional",
t.shape().DebugString()));
OP_REQUIRES(context, t.NumElements() == 2,
errors::InvalidArgument("t must have two elements",
t.shape().DebugString()));
auto data_vec = t.flat<float>().data();
*v_1 = data_vec[0];
*v_2 = data_vec[1];
}
template <typename Device, typename T>
class ScaleAndTranslateOp : public OpKernel {
public:
explicit ScaleAndTranslateOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("antialias", &antialias_));
string kernel_type_str;
OP_REQUIRES_OK(context, context->GetAttr("kernel_type", &kernel_type_str));
kernel_type_ = functor::SamplingKernelTypeFromString(kernel_type_str);
OP_REQUIRES(context, kernel_type_ != functor::SamplingKernelTypeEnd,
errors::InvalidArgument("Unrecognized kernel type: " +
kernel_type_str));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
const Tensor& output_shape_t = context->input(1);
OP_REQUIRES(context, output_shape_t.dims() == 1,
errors::InvalidArgument("output_shape_t must be 1-dimensional",
output_shape_t.shape().DebugString()));
OP_REQUIRES(context, output_shape_t.NumElements() == 2,
errors::InvalidArgument("output_shape_t must have two elements",
output_shape_t.shape().DebugString()));
auto output_shape_vec = output_shape_t.vec<int32>();
const int64_t output_height = internal::SubtleMustCopy(output_shape_vec(0));
const int64_t output_width = internal::SubtleMustCopy(output_shape_vec(1));
OP_REQUIRES(
context,
FastBoundsCheck(input.dim_size(1), std::numeric_limits<int32>::max()) &&
FastBoundsCheck(input.dim_size(2),
std::numeric_limits<int32>::max()),
errors::InvalidArgument("input sizes must be between 0 and max int32"));
const int64_t batch_size = input.dim_size(0);
const int64_t input_height = input.dim_size(1);
const int64_t input_width = input.dim_size(2);
const int64_t channels = input.dim_size(3);
OP_REQUIRES(context, output_height > 0 && output_width > 0,
errors::InvalidArgument("output dimensions must be positive"));
OP_REQUIRES(
context, channels > 0,
errors::InvalidArgument("image must have at least one channel"));
OP_REQUIRES(
context, input.dim_size(1) > 0 && input.dim_size(2) > 0,
errors::InvalidArgument("input image must be of non-zero size"));
float row_scale, col_scale;
GetValues(context, 2, &row_scale, &col_scale);
OP_REQUIRES(context, row_scale > 0 && col_scale > 0,
errors::InvalidArgument("Scale must be greater than zero."));
float row_translation, col_translation;
GetValues(context, 3, &row_translation, &col_translation);
Tensor* output = nullptr;
TensorShape output_shape;
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(input.dim_size(0)));
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(output_height));
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(output_width));
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(input.dim_size(3)));
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
if (!context->status().ok()) return;
if (output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor image_data(input.tensor<T, 4>());
TTypes<float, 4>::Tensor output_data = output->tensor<float, 4>();
functor::Spans col_spans;
OP_REQUIRES_OK(
context,
ComputeSpans(context, kernel_type_, output_width, input_width,
col_scale, col_translation, antialias_, &col_spans));
functor::Spans row_spans;
OP_REQUIRES_OK(
context,
ComputeSpans(context, kernel_type_, output_height, input_height,
row_scale, row_translation, antialias_, &row_spans));
Tensor intermediate_t;
OP_REQUIRES_OK(
context, context->allocate_temp(DT_FLOAT,
TensorShape({batch_size, output_height,
input_width, channels}),
&intermediate_t));
TTypes<float, 4>::Tensor intermediate_data =
intermediate_t.tensor<float, 4>();
const functor::Spans& const_row_spans = row_spans;
typename TTypes<int32, 1>::ConstTensor row_starts(
const_row_spans.starts.tensor<int32, 1>());
typename TTypes<float, 1>::ConstTensor row_weights(
const_row_spans.weights.tensor<float, 1>());
const functor::Spans& const_col_spans = col_spans;
typename TTypes<int32, 1>::ConstTensor col_starts(
const_col_spans.starts.tensor<int32, 1>());
typename TTypes<float, 1>::ConstTensor col_weights(
const_col_spans.weights.tensor<float, 1>());
functor::GatherSpans<Device, T>()(
context, context->eigen_device<Device>(), row_spans.span_size,
row_starts, row_weights, col_spans.span_size, col_starts, col_weights,
image_data, intermediate_data, output_data);
}
functor::SamplingKernelType kernel_type_;
bool antialias_;
};
template <typename Device, typename T>
class ScaleAndTranslateGradOp : public OpKernel {
public:
explicit ScaleAndTranslateGradOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("antialias", &antialias_));
string kernel_type_str;
OP_REQUIRES_OK(context, context->GetAttr("kernel_type", &kernel_type_str));
kernel_type_ = functor::SamplingKernelTypeFromString(kernel_type_str);
OP_REQUIRES(context, kernel_type_ != functor::SamplingKernelTypeEnd,
errors::InvalidArgument("Unrecognized kernel type: " +
kernel_type_str));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& original_image = context->input(1);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input_grad must be 4-dimensional",
input.shape().DebugString()));
OP_REQUIRES(context, input.dtype() == DT_FLOAT,
errors::InvalidArgument("input_grad must be of type float",
DataTypeString(input.dtype())));
OP_REQUIRES(context, original_image.dims() == 4,
errors::InvalidArgument("original_image must be 4-dimensional",
original_image.shape().DebugString()));
const int64_t batch_size = input.dim_size(0);
const int64_t channels = input.dim_size(3);
const int64_t forward_input_height = original_image.dim_size(1);
const int64_t forward_input_width = original_image.dim_size(2);
OP_REQUIRES(context,
FastBoundsCheck(forward_input_height,
std::numeric_limits<int32>::max()) &&
FastBoundsCheck(forward_input_width,
std::numeric_limits<int32>::max()),
errors::InvalidArgument(
"original sizes must be between 0 and max int32"));
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
0,
TensorShape({batch_size, forward_input_height,
forward_input_width, channels}),
&output));
float row_scale, col_scale;
GetValues(context, 2, &row_scale, &col_scale);
OP_REQUIRES(context, row_scale > 0 && col_scale > 0,
errors::InvalidArgument("Scale must be greater than zero."));
float row_translation, col_translation;
GetValues(context, 3, &row_translation, &col_translation);
if (!context->status().ok()) return;
TTypes<float, 4>::ConstTensor input_grad = input.tensor<float, 4>();
typename TTypes<T, 4>::Tensor output_grad(output->tensor<T, 4>());
const int64_t forward_output_height = input_grad.dimension(1);
const int64_t forward_output_width = input_grad.dimension(2);
functor::Spans col_spans;
OP_REQUIRES_OK(context,
ComputeGradSpans(context, kernel_type_, forward_output_width,
forward_input_width, col_scale,
col_translation, antialias_, &col_spans));
functor::Spans row_spans;
OP_REQUIRES_OK(
context, ComputeGradSpans(context, kernel_type_, forward_output_height,
forward_input_height, row_scale,
row_translation, antialias_, &row_spans));
Tensor intermediate_t;
OP_REQUIRES_OK(context, context->allocate_temp(
DT_FLOAT,
TensorShape({batch_size, forward_input_height,
forward_output_width, channels}),
&intermediate_t));
TTypes<float, 4>::Tensor intermediate_data =
intermediate_t.tensor<float, 4>();
const functor::Spans& const_row_spans = row_spans;
typename TTypes<int32, 1>::ConstTensor row_starts =
const_row_spans.starts.tensor<int32, 1>();
typename TTypes<float, 1>::ConstTensor row_weights(
const_row_spans.weights.tensor<float, 1>());
const functor::Spans& const_col_spans = col_spans;
typename TTypes<int32, 1>::ConstTensor col_starts(
const_col_spans.starts.tensor<int32, 1>());
typename TTypes<float, 1>::ConstTensor col_weights(
const_col_spans.weights.tensor<float, 1>());
functor::GatherSpans<Device, T>()(
context, context->eigen_device<Device>(), row_spans.span_size,
row_starts, row_weights, col_spans.span_size, col_starts, col_weights,
input_grad, intermediate_data, output_grad);
}
functor::SamplingKernelType kernel_type_;
bool antialias_;
};
template <typename T>
void GatherColumns(OpKernelContext* context, int span_size, const int32* starts,
const float* weights, const T* image,
const int64_t input_height, const int64_t input_width,
const int64_t output_height, const int64_t output_width,
const int channels, float* output) {
const int64_t in_row_size = input_width * channels;
const int64_t out_row_size = output_width * channels;
auto ParallelGatherColumns = [&](int64_t start, int64_t end) {
for (int y = start; y < end; ++y) {
const T* input_row_start = image + in_row_size * y;
float* out_pix = output + out_row_size * y;
for (int x = 0; x < output_width; ++x, out_pix += channels) {
const T* in_pix = input_row_start + starts[x] * channels;
const float* weights_start = weights + x * span_size;
const int real_span_size =
std::min(starts[x] + span_size, static_cast<int>(input_width)) -
starts[x];
const float* weights_end = weights_start + real_span_size;
for (int c = 0; c < channels; ++c) {
out_pix[c] = 0.0f;
}
for (const float* weight_ptr = weights_start; weight_ptr != weights_end;
++weight_ptr) {
float w = *weight_ptr;
for (int c = 0; c < channels; ++c) {
out_pix[c] += w * static_cast<float>(in_pix[c]);
}
in_pix += channels;
}
}
}
};
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
const int64_t block_size = 1;
worker_threads.workers->ParallelFor(
output_height,
tsl::thread::ThreadPool::SchedulingParams(
tsl::thread::ThreadPool::SchedulingStrategy::kFixedBlockSize,
std::nullopt, block_size),
ParallelGatherColumns);
}
template <typename T>
inline void AddScaledVector(const T* in_vec, int vec_len, float weight,
float* out_vec) {
float* out_vec_end = out_vec + vec_len;
for (; out_vec != out_vec_end; ++out_vec, ++in_vec) {
*out_vec += weight * static_cast<float>(*in_vec);
}
}
template <typename T>
void GatherRows(OpKernelContext* context, int span_size, const int32* starts,
const float* weights, const T* image,
const int64_t input_height, const int64_t input_width,
const int64_t output_height, const int64_t output_width,
const int channels, float* output) {
const int64_t in_row_size = input_width * channels;
const int64_t out_row_size = output_width * channels;
auto ParallelGatherRows = [&](int64_t start, int64_t end) {
for (int y = start; y < end; ++y) {
float* out_row_data = output + out_row_size * y;
std::fill(out_row_data, out_row_data + out_row_size, 0.0f);
int in_row = starts[y];
const T* in_row_data = image + in_row_size * in_row;
const float* weights_start = weights + y * span_size;
const int real_span_size =
std::min(starts[y] + span_size, static_cast<int>(input_height)) -
starts[y];
const float* const weights_end = weights_start + real_span_size;
for (const float* weight_it = weights_start; weight_it != weights_end;
++weight_it) {
AddScaledVector(in_row_data, in_row_size, *weight_it, out_row_data);
in_row_data += in_row_size;
}
}
};
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
const int64_t block_size = 1;
worker_threads.workers->ParallelFor(
output_height,
tsl::thread::ThreadPool::SchedulingParams(
tsl::thread::ThreadPool::SchedulingStrategy::kFixedBlockSize,
std::nullopt, block_size),
ParallelGatherRows);
}
}
template <typename T>
struct GatherSpans<CPUDevice, T> {
void operator()(OpKernelContext* context, const CPUDevice& d,
int row_span_size,
typename TTypes<int32, 1>::ConstTensor row_starts,
typename TTypes<float, 1>::ConstTensor row_weights,
int col_span_size,
typename TTypes<int32, 1>::ConstTensor col_starts,
typename TTypes<float, 1>::ConstTensor col_weights,
typename TTypes<T, 4>::ConstTensor images,
typename TTypes<float, 4>::Tensor intermediate_buffer,
typename TTypes<float, 4>::Tensor resized_images) {
const int batch_size = images.dimension(0);
const int64_t input_height = images.dimension(1);
const int64_t input_width = images.dimension(2);
const int channels = images.dimension(3);
const int64_t output_height = resized_images.dimension(1);
const int64_t output_width = resized_images.dimension(2);
const int64_t input_pix_per_batch = input_width * input_height * channels;
const int64_t intermediate_pix_per_batch =
input_width * output_height * channels;
const int64_t output_pix_per_batch =
output_width * output_height * channels;
float* intermediate_ptr = intermediate_buffer.data();
const T* image_ptr = images.data();
float* out_ptr = resized_images.data();
for (int b = 0; b < batch_size; ++b, image_ptr += input_pix_per_batch,
intermediate_ptr += intermediate_pix_per_batch,
out_ptr += output_pix_per_batch) {
GatherRows(context, row_span_size, row_starts.data(), row_weights.data(),
image_ptr, input_height, input_width, output_height,
input_width, channels, intermediate_ptr);
GatherColumns(context, col_span_size, col_starts.data(),
col_weights.data(), intermediate_ptr, output_height,
input_width, output_height, output_width, channels,
out_ptr);
}
}
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ScaleAndTranslate") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size") \
.HostMemory("scale") \
.HostMemory("translation"), \
ScaleAndTranslateOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ScaleAndTranslateGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("scale") \
.HostMemory("translation"), \
ScaleAndTranslateGradOp<CPUDevice, T>);
TF_CALL_float(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
}
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/image/sampling_kernels.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
using Eigen::Vector2f;
class DynamicKernel {
public:
virtual ~DynamicKernel() {}
virtual float Value(const float x) const = 0;
virtual float Radius() const = 0;
};
template <typename KernelType>
class TypedDynamicKernel : public DynamicKernel {
public:
explicit TypedDynamicKernel(const KernelType& kernel) : kernel_(kernel) {}
float Value(const float x) const override { return kernel_(x); }
float Radius() const override { return kernel_.Radius(); }
const KernelType kernel_;
};
template <typename KernelType>
std::unique_ptr<const DynamicKernel> CreateKernel(const KernelType& kernel) {
return std::make_unique<TypedDynamicKernel<KernelType>>(kernel);
}
std::unique_ptr<const DynamicKernel> Create(
functor::SamplingKernelType kernel_type) {
switch (kernel_type) {
case functor::Lanczos1Kernel:
return CreateKernel(functor::CreateLanczos1Kernel());
case functor::Lanczos3Kernel:
return CreateKernel(functor::CreateLanczos3Kernel());
case functor::Lanczos5Kernel:
return CreateKernel(functor::CreateLanczos5Kernel());
case functor::GaussianKernel:
return CreateKernel(functor::CreateGaussianKernel());
case functor::BoxKernel:
return CreateKernel(functor::CreateBoxKernel());
case functor::TriangleKernel:
return CreateKernel(functor::CreateTriangleKernel());
case functor::KeysCubicKernel:
return CreateKernel(functor::CreateKeysCubicKernel());
case functor::MitchellCubicKernel:
return CreateKernel(functor::CreateMitchellCubicKernel());
default:
LOG(FATAL) << "Unknown kernel type.";
return nullptr;
}
}
template <typename T>
inline const T& Clamp(const T& low, const T& high, const T& value) {
return std::min(high, std::max(low, value));
}
void Sample(const DynamicKernel& kernel, const bool antialias,
TTypes<float, 4>::Tensor images, const int batch,
const Vector2f& scale, const Vector2f& sample_f, float* dest) {
const Vector2f kernel_scale(antialias ? std::max(scale.x(), 1.0f) : 1.0,
antialias ? std::max(scale.y(), 1.0f) : 1.0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
const int64_t y_span_start = Clamp(
static_cast<int64_t>(0), in_height - 1,
static_cast<int64_t>(
std::ceil(sample_f.y() - kernel.Radius() * kernel_scale.y() - 0.5f)));
const int64_t y_span_end =
Clamp(static_cast<int64_t>(0), in_height - 1,
static_cast<int64_t>(std::floor(
sample_f.y() + kernel.Radius() * kernel_scale.y() - 0.5f))) +
1;
const int64_t x_span_start = Clamp(
static_cast<int64_t>(0), in_width - 1,
static_cast<int64_t>(
std::ceil(sample_f.x() - kernel.Radius() * kernel_scale.x() - 0.5f)));
const int64_t x_span_end =
Clamp(static_cast<int64_t>(0), in_width - 1,
static_cast<int64_t>(std::floor(
sample_f.x() + kernel.Radius() * kernel_scale.x() - 0.5f))) +
1;
std::fill(dest, dest + channels, 0.0f);
if (sample_f.x() < 0.0f || sample_f.y() < 0.0f || sample_f.x() > in_width ||
sample_f.y() > in_height) {
return;
}
const Vector2f one_over_kernel_scale(1.0f / kernel_scale.x(),
1.0f / kernel_scale.y());
float total_weight = 0.0f;
for (int64_t y = y_span_start; y < y_span_end; ++y) {
float y_kernel_pos = static_cast<float>(y) + 0.5f - sample_f.y();
float y_weight = kernel.Value(y_kernel_pos * one_over_kernel_scale.y());
for (int64_t x = x_span_start; x < x_span_end; ++x) {
float x_kernel_pos = static_cast<float>(x) + 0.5f - sample_f.x();
float x_weight = kernel.Value(x_kernel_pos * one_over_kernel_scale.x());
float kernel_weight = y_weight * x_weight;
total_weight += kernel_weight;
for (int c = 0; c < channels; ++c) {
dest[c] += static_cast<float>(images(batch, y, x, c)) * kernel_weight;
}
}
}
if (std::abs(total_weight) >= 1000.0f * std::numeric_limits<float>::min()) {
CHECK_NE(total_weight, 0.0f) << y_span_start << "," << y_span_end << " "
<< x_span_start << "," << x_span_end;
for (int c = 0; c < channels; ++c) {
dest[c] /= total_weight;
}
}
}
void ScaleAndTranslateBaseline(const DynamicKernel& kernel,
const bool antialias,
TTypes<float, 4>::Tensor images,
const Vector2f& orig_scale,
const Vector2f& orig_translate,
TTypes<float, 4>::Tensor output) {
const Vector2f scale(1.0f / orig_scale[0], 1.0f / orig_scale[1]);
const Vector2f translate(-orig_translate[0] / orig_scale[0],
-orig_translate[1] / orig_scale[1]);
const int batch = images.dimension(0);
const int channels = images.dimension(3);
ASSERT_EQ(batch, output.dimension(0));
ASSERT_EQ(channels, output.dimension(3));
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
for (int b = 0; b < batch; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const float out_y_f = static_cast<float>(y) + 0.5;
const float in_y_f = out_y_f * scale.y() + translate.y();
for (int64_t x = 0; x < out_width; ++x) {
const float out_x_f = static_cast<float>(x) + 0.5;
const float in_x_f = out_x_f * scale.x() + translate.x();
if (in_x_f < 0.0f || in_y_f < 0.0f || in_x_f > in_width ||
in_y_f > in_height) {
std::fill(&output(b, y, x, 0), &output(b, y, x + 1, 0), 0.0f);
} else {
Sample(kernel, antialias, images, b, scale, Vector2f(in_x_f, in_y_f),
&output(b, y, x, 0));
}
}
}
}
}
class ScaleAndTranslateOpTest : public OpsTestBase {
protected:
void CreateOp(const string& kernel_type_str, const bool antialias) {
TF_EXPECT_OK(NodeDefBuilder("scale_and_translate_op", "ScaleAndTranslate")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("kernel_type", kernel_type_str)
.Attr("antialias", antialias)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
kernel_type_ = functor::SamplingKernelTypeFromString(kernel_type_str);
antialias_ = antialias;
}
void SetCheckerboardImageInput(int batch_size, int num_row_squares,
int num_col_squares, int square_size,
int num_channels) {
inputs_.clear();
std::vector<float> data;
const int64_t row_size = num_col_squares * square_size * num_channels;
const int64_t image_size = num_row_squares * square_size * row_size;
data.resize(batch_size * image_size);
random::PhiloxRandom philox(42);
random::SimplePhilox rnd(&philox);
std::vector<float> col(num_channels);
for (int b = 0; b < batch_size; ++b) {
for (int y = 0; y < num_row_squares; ++y) {
for (int x = 0; x < num_col_squares; ++x) {
for (int n = 0; n < num_channels; ++n) {
col[n] = rnd.RandFloat();
}
for (int r = y * square_size; r < (y + 1) * square_size; ++r) {
auto it = data.begin() + b * image_size + r * row_size +
x * square_size * num_channels;
for (int n = 0; n < square_size; ++n) {
for (int chan = 0; chan < num_channels; ++chan, ++it) {
*it = col[chan] * 255.0;
}
}
}
}
}
}
AddInputFromArray<float>(
TensorShape({batch_size, num_row_squares * square_size,
num_col_squares * square_size, num_channels}),
data);
}
void RunTest(int output_image_height, int output_image_width,
const Vector2f& scale, const Vector2f& translate) {
AddInputFromArray<int32>(TensorShape({2}),
{output_image_height, output_image_width});
AddInputFromArray<float>(TensorShape({2}), {scale[1], scale[0]});
AddInputFromArray<float>(TensorShape({2}), {translate[1], translate[0]});
Status s = RunOpKernel();
const int batch_size = GetOutput(0)->dim_size(0);
const int channels = GetOutput(0)->dim_size(3);
Tensor expected(allocator(), DT_FLOAT,
TensorShape({batch_size, output_image_height,
output_image_width, channels}));
std::unique_ptr<const DynamicKernel> kernel = Create(kernel_type_);
ScaleAndTranslateBaseline(*kernel, antialias_,
mutable_input(0)->tensor<float, 4>(), scale,
translate, expected.tensor<float, 4>());
constexpr double kAbs = 1e-2f;
test::ExpectTensorNear<float>(expected, *GetOutput(0), kAbs);
}
functor::SamplingKernelType kernel_type_;
bool antialias_;
};
TEST_F(ScaleAndTranslateOpTest, IdentityTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = kNumRowSquares * kSquareSize;
constexpr int kOutputImageWidth = kNumColSquares * kSquareSize;
const Vector2f kScale(1.0f, 1.0f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, UpsampleTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = kNumRowSquares * kSquareSize * 2;
constexpr int kOutputImageWidth = kNumColSquares * kSquareSize * 2;
const Vector2f kScale(2.0f, 2.0f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, DownsampleTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = kNumRowSquares * kSquareSize / 2;
constexpr int kOutputImageWidth = kNumColSquares * kSquareSize / 2;
const Vector2f kScale(0.5f, 0.5f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, AntiAliasedDownsampleToASinglePixelTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 1;
constexpr int kOutputImageWidth = 1;
const Vector2f kScale(1.0f / (kNumRowSquares * kSquareSize),
1.0f / (kNumColSquares * kSquareSize));
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, NonAntiAliasedDownsampleToASinglePixelTest) {
CreateOp("lanczos3", false);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 1;
constexpr int kOutputImageWidth = 1;
const Vector2f kScale(1.0f / (kNumRowSquares * kSquareSize),
1.0f / (kNumColSquares * kSquareSize));
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, UsampleFromASinglePixelTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 1;
constexpr int64_t kNumColSquares = 1;
constexpr int64_t kSquareSize = 1;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 10;
constexpr int kOutputImageWidth = 17;
const Vector2f kScale(17.0f, 10.0f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, NonAntialiasedUsampleFromASinglePixelTest) {
CreateOp("lanczos3", false);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 1;
constexpr int64_t kNumColSquares = 1;
constexpr int64_t kSquareSize = 1;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 10;
constexpr int kOutputImageWidth = 17;
const Vector2f kScale(17.0f, 10.0f);
const Vector2f kTranslate(0.0f, 0.0f);
antialias_ = true;
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, AntialiasedScaleAndTranslationTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 11;
constexpr int64_t kNumColSquares = 7;
constexpr int64_t kSquareSize = 5;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 49;
constexpr int kOutputImageWidth = 51;
const Vector2f kScale(1.25f, 0.6f);
const Vector2f kTranslate(4.1f, -3.1f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, NonAntialiasedScaleAndTranslationTest) {
CreateOp("lanczos3", false);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 11;
constexpr int64_t kNumColSquares = 7;
constexpr int64_t kSquareSize = 5;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 49;
constexpr int kOutputImageWidth = 51;
const Vector2f kScale(1.25f, 0.6f);
const Vector2f kTranslate(4.1f, -3.1f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, TestKernelTypes) {
const std::vector<string> kKernelTypes = {
"lanczos1", "lanczos3", "lanczos5", "box",
"triangle", "keyscubic", "mitchellcubic"};
for (const string& kernel_type : kKernelTypes) {
CreateOp(kernel_type, true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 10;
constexpr int64_t kNumColSquares = 11;
constexpr int64_t kSquareSize = 1;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 9;
constexpr int kOutputImageWidth = 11;
const Vector2f kScale(1.9f, 1.9f);
const Vector2f kTranslate(0.3f, 2.1f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/scale_and_translate_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/scale_and_translate_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c06ee53d-22e7-4329-98c3-c4268f794d6d | cpp | tensorflow/tensorflow | resize_area_op | tensorflow/core/kernels/image/resize_area_op.cc | tensorflow/core/kernels/image/resize_area_op_test.cc | #define EIGEN_USE_THREADS
#include <algorithm>
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
namespace {
struct CachedInterpolation {
int64_t start;
int64_t end;
float start_scale;
float end_minus_one_scale;
bool needs_bounding;
};
}
template <typename Device, typename T>
class ResizeAreaOp : public OpKernel {
public:
explicit ResizeAreaOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
}
template <bool NeedsXBounding>
static void ComputePatchSumOf3Channels(float scale,
const ImageResizerState& st,
const std::vector<const T*>& y_ptrs,
const std::vector<float>& y_scales,
const CachedInterpolation& x_interp,
float* output_ptr) {
#define BOUND_IF_NEEDED(x, y) (NeedsXBounding ? Bound(x, y) : (x))
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
for (int i = 0; i < y_ptrs.size(); ++i) {
const T* ptr = y_ptrs[i];
float scale_x = x_interp.start_scale;
int64_t offset = 3 * BOUND_IF_NEEDED(x_interp.start, st.in_width);
float sum_y_0 = static_cast<float>(ptr[offset + 0]) * scale_x;
float sum_y_1 = static_cast<float>(ptr[offset + 1]) * scale_x;
float sum_y_2 = static_cast<float>(ptr[offset + 2]) * scale_x;
if (x_interp.start + 1 != x_interp.end) {
for (int64_t x = x_interp.start + 1; x < x_interp.end - 1; ++x) {
int64_t offset = 3 * BOUND_IF_NEEDED(x, st.in_width);
sum_y_0 += static_cast<float>(ptr[offset + 0]);
sum_y_1 += static_cast<float>(ptr[offset + 1]);
sum_y_2 += static_cast<float>(ptr[offset + 2]);
}
scale_x = x_interp.end_minus_one_scale;
offset = 3 * BOUND_IF_NEEDED(x_interp.end - 1, st.in_width);
sum_y_0 += static_cast<float>(ptr[offset + 0]) * scale_x;
sum_y_1 += static_cast<float>(ptr[offset + 1]) * scale_x;
sum_y_2 += static_cast<float>(ptr[offset + 2]) * scale_x;
}
sum_0 += sum_y_0 * y_scales[i];
sum_1 += sum_y_1 * y_scales[i];
sum_2 += sum_y_2 * y_scales[i];
}
output_ptr[0] = sum_0 * scale;
output_ptr[1] = sum_1 * scale;
output_ptr[2] = sum_2 * scale;
#undef BOUND_IF_NEEDED
}
template <bool NeedsXBounding>
static void ComputePatchSum(float scale, const ImageResizerState& st,
const std::vector<const T*>& y_ptrs,
const std::vector<float>& y_scales,
const CachedInterpolation& x_interp,
float* output_ptr) {
#define BOUND_IF_NEEDED(x, y) (NeedsXBounding ? Bound(x, y) : (x))
const auto num_channels = st.channels;
for (int64_t c = 0; c < num_channels; ++c) {
float sum = 0;
for (int i = 0; i < y_ptrs.size(); ++i) {
const T* ptr = y_ptrs[i];
float scale_x = x_interp.start_scale;
float sum_y = static_cast<float>(
ptr[num_channels *
BOUND_IF_NEEDED(x_interp.start, st.in_width) +
c]) *
scale_x;
if (x_interp.start + 1 != x_interp.end) {
for (int64_t x = x_interp.start + 1; x < x_interp.end - 1; ++x) {
sum_y += static_cast<float>(
ptr[num_channels * BOUND_IF_NEEDED(x, st.in_width) + c]);
}
scale_x = x_interp.end_minus_one_scale;
sum_y += static_cast<float>(
ptr[num_channels *
BOUND_IF_NEEDED(x_interp.end - 1, st.in_width) +
c]) *
scale_x;
}
sum += sum_y * y_scales[i];
}
output_ptr[c] = sum * scale;
}
#undef BOUND_IF_NEEDED
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, false);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
typename TTypes<T, 4>::ConstTensor input_data(
context->input(0).tensor<T, 4>());
std::vector<CachedInterpolation> x_interps(st.out_width);
for (int64_t x = 0; x < st.out_width; ++x) {
auto& x_interp = x_interps[x];
const float in_x = x * st.width_scale;
const float in_x1 = (x + 1) * st.width_scale;
int64_t v = std::floor(in_x);
x_interp.start = v;
x_interp.start_scale =
v < in_x ? (v + 1 > in_x1 ? st.width_scale : v + 1 - in_x)
: (v + 1 > in_x1 ? in_x1 - v : 1.0);
v = std::ceil(in_x1);
x_interp.end = v;
v = x_interp.end - 1;
x_interp.end_minus_one_scale =
v < in_x ? (v + 1 > in_x1 ? st.width_scale : v + 1 - in_x)
: (v + 1 > in_x1 ? in_x1 - v : 1.0);
x_interp.needs_bounding =
Bound(x_interp.start, st.in_width) != x_interp.start ||
Bound(x_interp.end - 1, st.in_width) != (x_interp.end - 1);
}
if (st.channels == 3) {
ComputeLoop<3>(st, x_interps, input_data);
} else {
ComputeLoop<-1>(st, x_interps, input_data);
}
}
template <int64_t kKnownNumChannels>
void ComputeLoop(const ImageResizerState& st,
const std::vector<CachedInterpolation>& x_interps,
typename TTypes<T, 4>::ConstTensor input_data) {
TTypes<float, 4>::Tensor output_data = st.output->tensor<float, 4>();
const T* const input_ptr = input_data.data();
std::vector<float> y_scales;
std::vector<const T*> y_ptrs;
float scale = 1.0 / (st.height_scale * st.width_scale);
float* output_ptr = output_data.data();
for (int64_t b = 0; b < st.batch_size; ++b) {
for (int64_t y = 0; y < st.out_height; ++y) {
const float in_y = y * st.height_scale;
const float in_y1 = (y + 1) * st.height_scale;
const int64_t y_start = std::floor(in_y);
const int64_t y_end = std::ceil(in_y1);
y_scales.clear();
y_ptrs.clear();
for (int64_t i = y_start; i < y_end; ++i) {
float scale_y;
if (i < in_y) {
scale_y = (i + 1 > in_y1 ? st.height_scale : i + 1 - in_y);
} else {
scale_y = (i + 1 > in_y1 ? in_y1 - i : 1.0);
}
y_scales.push_back(scale_y);
y_ptrs.push_back(
input_ptr + (b * st.in_height * st.in_width * st.channels +
Bound(i, st.in_height) * st.in_width * st.channels));
}
if (kKnownNumChannels == 3) {
for (int64_t x = 0; x < st.out_width; ++x) {
const CachedInterpolation& x_interp = x_interps[x];
if (x_interp.needs_bounding) {
ComputePatchSumOf3Channels<true>(scale, st, y_ptrs, y_scales,
x_interp, output_ptr);
} else {
ComputePatchSumOf3Channels<false>(scale, st, y_ptrs, y_scales,
x_interp, output_ptr);
}
output_ptr += 3;
}
} else {
for (int64_t x = 0; x < st.out_width; ++x) {
const CachedInterpolation& x_interp = x_interps[x];
if (x_interp.needs_bounding) {
ComputePatchSum<true>(scale, st, y_ptrs, y_scales, x_interp,
output_ptr);
} else {
ComputePatchSum<false>(scale, st, y_ptrs, y_scales, x_interp,
output_ptr);
}
output_ptr += st.channels;
}
}
}
}
}
private:
static EIGEN_ALWAYS_INLINE int64_t Bound(int64_t val, int64_t limit) {
return std::min(limit - 1, std::max(int64_t{0}, val));
}
bool align_corners_;
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeArea") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeAreaOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
} | #include <cmath>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
class ResizeAreaOpTest : public OpsTestBase {
protected:
ResizeAreaOpTest() = default;
void CreateOp(bool align_corners) {
TF_EXPECT_OK(NodeDefBuilder("resize_area_op", "ResizeArea")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
const Tensor* SetRandomImageInput(const TensorShape& shape) {
inputs_.clear();
CHECK_EQ(shape.dims(), 4) << "All images must have 4 dimensions.";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(device_->GetAllocator(AllocatorAttributes()),
DataTypeToEnum<float>::v(), shape);
input->flat<float>().setRandom();
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]),
DataTypeToEnum<float>::v());
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], DataTypeToEnum<float>::v());
inputs_.push_back({nullptr, input});
}
return input;
}
private:
void ResizeAreaBaseline(TTypes<float, 4>::ConstTensor input_data,
TTypes<float, 4>::Tensor output_data) {
const int batch_size = input_data.dimension(0);
const int64_t in_height = input_data.dimension(1);
const int64_t in_width = input_data.dimension(2);
const int channels = input_data.dimension(3);
ASSERT_EQ(batch_size, output_data.dimension(0));
ASSERT_EQ(channels, output_data.dimension(3));
const int64_t out_height = output_data.dimension(1);
const int64_t out_width = output_data.dimension(2);
const float height_scale = in_height / static_cast<float>(out_height);
const float width_scale = in_width / static_cast<float>(out_width);
Tensor sum_tensor(DT_FLOAT, TensorShape({channels}));
typename TTypes<float, 1>::Tensor sum_data = sum_tensor.vec<float>();
float scale = 1.0 / (height_scale * width_scale);
for (int64_t b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const float in_y = y * height_scale;
const float in_y1 = (y + 1) * height_scale;
int64_t y_start = std::floor(in_y);
int64_t y_end = std::ceil(in_y1);
for (int64_t x = 0; x < out_width; ++x) {
const float in_x = x * width_scale;
const float in_x1 = (x + 1) * width_scale;
int64_t x_start = std::floor(in_x);
int64_t x_end = std::ceil(in_x1);
sum_data.setConstant(0.0);
for (int64_t i = y_start; i < y_end; ++i) {
float scale_y = i < in_y
? (i + 1 > in_y1 ? height_scale : i + 1 - in_y)
: (i + 1 > in_y1 ? in_y1 - i : 1.0);
for (int64_t j = x_start; j < x_end; ++j) {
float scale_x = j < in_x
? (j + 1 > in_x1 ? width_scale : j + 1 - in_x)
: (j + 1 > in_x1 ? in_x1 - j : 1.0);
for (int64_t c = 0; c < channels; ++c) {
#define BOUND(val, limit) \
std::min(((limit)-int64_t{1}), (std::max(int64_t{0}, (val))))
sum_data(c) +=
static_cast<float>(input_data(b, BOUND(i, in_height),
BOUND(j, in_width), c)) *
scale_y * scale_x * scale;
#undef BOUND
}
}
}
for (int64_t c = 0; c < channels; ++c) {
output_data(b, y, x, c) = sum_data(c);
}
}
}
}
}
protected:
void RunRandomTest(int in_height, int in_width, int target_height,
int target_width, int channels) {
const Tensor* input =
SetRandomImageInput(TensorShape({1, in_height, in_width, channels}));
AddInputFromArray<int32>(TensorShape({2}), {target_height, target_width});
TF_ASSERT_OK(RunOpKernel());
std::unique_ptr<Tensor> expected(
new Tensor(device_->GetAllocator(AllocatorAttributes()),
DataTypeToEnum<float>::v(),
TensorShape({1, target_height, target_width, channels})));
ResizeAreaBaseline(input->tensor<float, 4>(), expected->tensor<float, 4>());
test::ExpectTensorNear<float>(*expected, *GetOutput(0), 0.00001);
}
void RunManyRandomTests(int channels) {
for (int in_w : {2, 4, 7, 20, 165}) {
for (int in_h : {1, 3, 5, 8, 100, 233}) {
for (int target_height : {1, 2, 3, 50, 113}) {
for (int target_width : {target_height, target_height / 2 + 1}) {
RunRandomTest(in_h, in_w, target_height, target_width, channels);
}
}
}
}
}
};
TEST_F(ResizeAreaOpTest, TestAreaRandom141x186) {
CreateOp(false);
RunRandomTest(141, 186, 299, 299, 3 );
}
TEST_F(ResizeAreaOpTest, TestAreaRandom183x229) {
CreateOp(false);
RunRandomTest(183, 229, 299, 299, 3 );
}
TEST_F(ResizeAreaOpTest, TestAreaRandom749x603) {
CreateOp(false);
RunRandomTest(749, 603, 299, 299, 3 );
}
TEST_F(ResizeAreaOpTest, TestAreaRandom1x1) {
CreateOp(false);
RunRandomTest(1, 1, 8, 8, 3 );
}
TEST_F(ResizeAreaOpTest, TestAreaRandom1x1AlignCorners) {
CreateOp(true);
RunRandomTest(1, 1, 8, 8, 3 );
}
TEST_F(ResizeAreaOpTest, TestAreaRandomDataSeveralInputsSizes1Channel) {
CreateOp(false);
RunManyRandomTests(1);
}
TEST_F(ResizeAreaOpTest, TestAreaRandomDataSeveralInputsSizes3Channels) {
CreateOp(false);
RunManyRandomTests(3);
}
TEST_F(ResizeAreaOpTest, TestAreaRandomDataSeveralInputsSizes4Channels) {
CreateOp(false);
RunManyRandomTests(4);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_area_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_area_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b9800ed6-bc3e-42db-a5d2-71bbee890890 | cpp | tensorflow/tensorflow | resize_bicubic_op | tensorflow/core/kernels/image/resize_bicubic_op.cc | tensorflow/core/kernels/image/resize_bicubic_op_test.cc | #define EIGEN_USE_THREADS
#include <math.h>
#include <algorithm>
#include <array>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
namespace {
static const int64_t kTableSize = (1 << 10);
const float* InitCoeffsTable(const double a) {
float* coeffs_table = new float[(kTableSize + 1) * 2];
for (int i = 0; i <= kTableSize; ++i) {
float x = i * 1.0 / kTableSize;
coeffs_table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
coeffs_table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
return coeffs_table;
}
const float* GetCoeffsTable(const bool use_keys_cubic) {
if (use_keys_cubic) {
static const float* coeffs_table = InitCoeffsTable(-0.5f);
return coeffs_table;
} else {
static const float* coeffs_table = InitCoeffsTable(-0.75f);
return coeffs_table;
}
}
inline int64_t Bound(int64_t val, int64_t limit) {
return std::min(limit - 1, std::max(int64_t{0}, val));
}
struct WeightsAndIndices {
float weight_0;
float weight_1;
float weight_2;
float weight_3;
int64_t index_0;
int64_t index_1;
int64_t index_2;
int64_t index_3;
int advance;
};
template <typename Scaler, bool use_keys_cubic>
inline void GetWeightsAndIndices(const float scale, const int64_t out_loc,
const int64_t limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const int64_t in_loc = std::floor(in_loc_f);
const float delta = in_loc_f - in_loc;
const int64_t offset = lrintf(delta * kTableSize);
const float* coeffs_table = GetCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
out->index_0 = Bound(in_loc - 1, limit);
out->weight_0 =
(out->index_0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->index_1 = Bound(in_loc, limit);
out->weight_1 = (out->index_1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->index_2 = Bound(in_loc + 1, limit);
out->weight_2 =
(out->index_2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->index_3 = Bound(in_loc + 2, limit);
out->weight_3 = (out->index_3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->weight_0 + out->weight_1 + out->weight_2 + out->weight_3;
if (std::abs(weight_sum) >= 1000.0f * std::numeric_limits<float>::min()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->weight_0 *= one_over_weight_sum;
out->weight_1 *= one_over_weight_sum;
out->weight_2 *= one_over_weight_sum;
out->weight_3 *= one_over_weight_sum;
}
} else {
out->weight_0 = coeffs_table[offset * 2 + 1];
out->weight_1 = coeffs_table[offset * 2];
out->weight_2 = coeffs_table[(kTableSize - offset) * 2];
out->weight_3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->index_0 = Bound(in_loc - 1, limit);
out->index_1 = Bound(in_loc, limit);
out->index_2 = Bound(in_loc + 1, limit);
out->index_3 = Bound(in_loc + 2, limit);
}
}
template <typename T>
inline float Interpolate1D(const float weight_0, const float weight_1,
const float weight_2, const float weight_3,
const T value_0, const T value_1, const T value_2,
const T value_3) {
return static_cast<float>(value_0) * weight_0 +
static_cast<float>(value_1) * weight_1 +
static_cast<float>(value_2) * weight_2 +
static_cast<float>(value_3) * weight_3;
}
static float Compute(float values_[4], const float xw_0, const float xw_1,
const float xw_2, const float xw_3) {
return Interpolate1D(xw_0, xw_1, xw_2, xw_3, values_[0], values_[1],
values_[2], values_[3]);
}
class CachedInterpolationCalculator {
public:
CachedInterpolationCalculator() : indexes_{-1, -1, -1, -1} {}
inline int Advance(const int64_t x_0, const int64_t x_1, const int64_t x_2,
const int64_t x_3) {
const std::array<int64_t, 4> new_x_indices{{x_0, x_1, x_2, x_3}};
int cached_values_hand = 0;
int new_indices_hand = 0;
while (cached_values_hand < 4) {
if (indexes_[cached_values_hand] == new_x_indices[new_indices_hand]) {
if (new_indices_hand < cached_values_hand) {
indexes_[new_indices_hand] = indexes_[cached_values_hand];
}
cached_values_hand++;
new_indices_hand++;
} else {
cached_values_hand++;
}
}
switch (new_indices_hand) {
case 0:
indexes_[0] = x_0;
TF_FALLTHROUGH_INTENDED;
case 1:
indexes_[1] = x_1;
TF_FALLTHROUGH_INTENDED;
case 2:
indexes_[2] = x_2;
TF_FALLTHROUGH_INTENDED;
case 3:
indexes_[3] = x_3;
break;
}
return new_indices_hand;
}
private:
int64_t indexes_[4];
};
static void ComputeXWeightsAndIndices(const ImageResizerState& resizer_state,
const bool half_pixel_centers,
std::vector<WeightsAndIndices>* x_wais) {
CachedInterpolationCalculator calc;
if (half_pixel_centers) {
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
GetWeightsAndIndices<HalfPixelScaler, true>(
resizer_state.width_scale, x, resizer_state.in_width, &(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
} else {
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
GetWeightsAndIndices<LegacyScaler, false>(
resizer_state.width_scale, x, resizer_state.in_width, &(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
}
for (int x = 0; x < resizer_state.out_width; ++x) {
(*x_wais)[x].index_0 *= resizer_state.channels;
(*x_wais)[x].index_1 *= resizer_state.channels;
(*x_wais)[x].index_2 *= resizer_state.channels;
(*x_wais)[x].index_3 *= resizer_state.channels;
}
}
static void ComputeGradientXWeightsAndIndices(
const ImageResizerGradientState& resizer_state,
const bool half_pixel_centers, std::vector<WeightsAndIndices>* x_wais) {
CachedInterpolationCalculator calc;
if (half_pixel_centers) {
for (int64_t x = 0; x < resizer_state.resized_width; ++x) {
GetWeightsAndIndices<HalfPixelScaler, true>(resizer_state.width_scale, x,
resizer_state.original_width,
&(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
} else {
for (int64_t x = 0; x < resizer_state.resized_width; ++x) {
GetWeightsAndIndices<LegacyScaler, false>(resizer_state.width_scale, x,
resizer_state.original_width,
&(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
}
}
template <typename T>
static EIGEN_ALWAYS_INLINE float ComputeYInterpolation(
int which, int channel_num, const WeightsAndIndices& y_wai,
const T* y_ptr_0, const T* y_ptr_1, const T* y_ptr_2, const T* y_ptr_3,
const WeightsAndIndices& x_wai) {
int x_index;
switch (which) {
case 0:
x_index = x_wai.index_0;
break;
case 1:
x_index = x_wai.index_1;
break;
case 2:
x_index = x_wai.index_2;
break;
default:
x_index = x_wai.index_3;
break;
}
const int64_t pt_index = x_index + channel_num;
return Interpolate1D<T>(y_wai.weight_0, y_wai.weight_1, y_wai.weight_2,
y_wai.weight_3, y_ptr_0[pt_index], y_ptr_1[pt_index],
y_ptr_2[pt_index], y_ptr_3[pt_index]);
}
template <typename T>
inline void interpolate_with_caching(
const typename TTypes<T, 4>::ConstTensor& input_data,
const ImageResizerState& resizer_state, const bool half_pixel_centers,
typename TTypes<float, 4>::Tensor output_data) {
std::vector<WeightsAndIndices> x_wais(resizer_state.out_width);
ComputeXWeightsAndIndices(resizer_state, half_pixel_centers, &x_wais);
const auto num_channels = resizer_state.channels;
const int64_t in_row_width = resizer_state.in_width * num_channels;
const int64_t in_batch_width = resizer_state.in_height * in_row_width;
const T* input_b_ptr = input_data.data();
float* output_y_ptr = output_data.data();
std::vector<float> cached_value(num_channels == 3 ? 0 : 4 * num_channels, 0);
for (int64_t b = 0; b < resizer_state.batch_size;
++b, input_b_ptr += in_batch_width) {
for (int64_t y = 0; y < resizer_state.out_height;
++y, output_y_ptr += resizer_state.out_width * num_channels) {
WeightsAndIndices y_wai;
if (half_pixel_centers) {
GetWeightsAndIndices<HalfPixelScaler, true>(
resizer_state.height_scale, y, resizer_state.in_height, &y_wai);
} else {
GetWeightsAndIndices<LegacyScaler, false>(
resizer_state.height_scale, y, resizer_state.in_height, &y_wai);
}
const T* y_ptr_0 = input_b_ptr + y_wai.index_0 * in_row_width;
const T* y_ptr_1 = input_b_ptr + y_wai.index_1 * in_row_width;
const T* y_ptr_2 = input_b_ptr + y_wai.index_2 * in_row_width;
const T* y_ptr_3 = input_b_ptr + y_wai.index_3 * in_row_width;
if (num_channels == 3) {
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
const WeightsAndIndices& x_wai = x_wais[x];
switch (x_wai.advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
switch (x_wai.advance) {
case 0:
cached_value_0[0] = ComputeYInterpolation(
0, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[0] = ComputeYInterpolation(
0, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[0] = ComputeYInterpolation(
0, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
TF_FALLTHROUGH_INTENDED;
case 1:
cached_value_0[1] = ComputeYInterpolation(
1, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[1] = ComputeYInterpolation(
1, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[1] = ComputeYInterpolation(
1, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
TF_FALLTHROUGH_INTENDED;
case 2:
cached_value_0[2] = ComputeYInterpolation(
2, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[2] = ComputeYInterpolation(
2, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[2] = ComputeYInterpolation(
2, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
TF_FALLTHROUGH_INTENDED;
case 3:
cached_value_0[3] = ComputeYInterpolation(
3, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[3] = ComputeYInterpolation(
3, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[3] = ComputeYInterpolation(
3, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
break;
}
output_y_ptr[x * num_channels + 0] =
Compute(cached_value_0, x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
output_y_ptr[x * num_channels + 1] =
Compute(cached_value_1, x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
output_y_ptr[x * num_channels + 2] =
Compute(cached_value_2, x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
}
} else {
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
const WeightsAndIndices& x_wai = x_wais[x];
switch (x_wai.advance) {
case 3:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = cached_value[4 * c + 1];
cached_value[4 * c + 1] = cached_value[4 * c + 2];
cached_value[4 * c + 2] = cached_value[4 * c + 3];
}
break;
case 2:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = cached_value[4 * c + 2];
cached_value[4 * c + 1] = cached_value[4 * c + 3];
}
break;
case 1: {
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = cached_value[4 * c + 3];
}
break;
}
}
switch (x_wai.advance) {
case 0:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = ComputeYInterpolation(
0, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
TF_FALLTHROUGH_INTENDED;
case 1:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 1] = ComputeYInterpolation(
1, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
TF_FALLTHROUGH_INTENDED;
case 2:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 2] = ComputeYInterpolation(
2, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
TF_FALLTHROUGH_INTENDED;
case 3:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 3] = ComputeYInterpolation(
3, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
break;
}
for (int64_t c = 0; c < num_channels; ++c) {
output_y_ptr[x * num_channels + c] =
Compute(&cached_value[4 * c], x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
}
}
}
}
}
}
template <typename T>
inline void ResizeBicubicGrad(typename TTypes<float, 4>::ConstTensor input_grad,
const ImageResizerGradientState& resizer_state,
const bool half_pixel_centers,
typename TTypes<T, 4>::Tensor output_grad) {
const float height_scale = resizer_state.height_scale;
const int64_t original_height = resizer_state.original_height;
const int channels = resizer_state.channels;
const int64_t resized_width = resizer_state.resized_width;
const int64_t resized_height = resizer_state.resized_height;
output_grad.setZero();
std::vector<WeightsAndIndices> x_wais(resizer_state.resized_width);
ComputeGradientXWeightsAndIndices(resizer_state, half_pixel_centers, &x_wais);
for (int64_t b = 0; b < resizer_state.batch_size; ++b) {
for (int64_t y = 0; y < resized_height; ++y) {
WeightsAndIndices y_wai;
if (half_pixel_centers) {
GetWeightsAndIndices<HalfPixelScaler, true>(height_scale, y,
original_height, &y_wai);
} else {
GetWeightsAndIndices<LegacyScaler, false>(height_scale, y,
original_height, &y_wai);
}
for (int64_t x = 0; x < resized_width; ++x) {
const WeightsAndIndices& x_wai = x_wais[x];
for (int64_t c = 0; c < channels; ++c) {
T curr_input_grad = input_grad(b, y, x, c);
output_grad(b, y_wai.index_0, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_0);
output_grad(b, y_wai.index_0, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_1);
output_grad(b, y_wai.index_0, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_2);
output_grad(b, y_wai.index_0, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_3);
output_grad(b, y_wai.index_1, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_0);
output_grad(b, y_wai.index_1, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_1);
output_grad(b, y_wai.index_1, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_2);
output_grad(b, y_wai.index_1, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_3);
output_grad(b, y_wai.index_2, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_0);
output_grad(b, y_wai.index_2, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_1);
output_grad(b, y_wai.index_2, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_2);
output_grad(b, y_wai.index_2, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_3);
output_grad(b, y_wai.index_3, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_0);
output_grad(b, y_wai.index_3, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_1);
output_grad(b, y_wai.index_3, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_2);
output_grad(b, y_wai.index_3, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_3);
}
}
}
}
}
}
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T>
class ResizeBicubicOp : public OpKernel {
public:
explicit ResizeBicubicOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
typename TTypes<T, 4>::ConstTensor input_data(
context->input(0).tensor<T, 4>());
TTypes<float, 4>::Tensor output_data = st.output->tensor<float, 4>();
interpolate_with_caching<T>(input_data, st, half_pixel_centers_,
output_data);
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
template <typename Device, typename T>
class ResizeBicubicOpGrad : public OpKernel {
public:
explicit ResizeBicubicOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerGradientState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
TTypes<float, 4>::ConstTensor input_grad =
context->input(0).tensor<float, 4>();
typename TTypes<T, 4>::Tensor output_grad(st.output->tensor<T, 4>());
ResizeBicubicGrad<T>(input_grad, st, half_pixel_centers_, output_grad);
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBicubic") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBicubicOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBicubicGrad").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
ResizeBicubicOpGrad<CPUDevice, T>);
TF_CALL_float(REGISTER_GRAD_KERNEL);
TF_CALL_double(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class ResizeBicubicOpTest : public OpsTestBase {
protected:
ResizeBicubicOpTest() {
TF_EXPECT_OK(NodeDefBuilder("resize_bicubic_op", "ResizeBicubic")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", false)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
const Tensor* SetRandomImageInput(const TensorShape& shape) {
inputs_.clear();
CHECK_EQ(shape.dims(), 4) << "All images must have 4 dimensions.";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(device_->GetAllocator(AllocatorAttributes()),
DataTypeToEnum<float>::v(), shape);
input->flat<float>().setRandom();
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]),
DataTypeToEnum<float>::v());
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], DataTypeToEnum<float>::v());
inputs_.push_back({nullptr, input});
}
return input;
}
private:
static constexpr int64_t kTableSize = (1 << 10);
const float* InitCoeffsTable() {
float* coeffs_tab = new float[(kTableSize + 1) * 2];
static const double A = -0.75;
for (int i = 0; i <= kTableSize; ++i) {
float x = i * 1.0 / kTableSize;
coeffs_tab[i * 2] = ((A + 2) * x - (A + 3)) * x * x + 1;
x += 1.0;
coeffs_tab[i * 2 + 1] = ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
return coeffs_tab;
}
const float* GetCoeffsTable() {
static const float* coeffs_tab = InitCoeffsTable();
return coeffs_tab;
}
inline int64_t Bound(int64_t val, int64_t limit) {
return std::min(limit - 1, std::max(int64_t{0}, val));
}
inline void GetWeightsAndIndices(float scale, int64_t out_loc, int64_t limit,
std::array<float, 4>* weights,
std::array<int64_t, 4>* indices) {
const int64_t in_loc = scale * out_loc;
const float in_loc_float = scale * out_loc;
const float delta = in_loc_float - in_loc;
const int64_t offset = lrintf(delta * kTableSize);
const float* coeffs_tab = GetCoeffsTable();
*weights = {{coeffs_tab[offset * 2 + 1], coeffs_tab[offset * 2],
coeffs_tab[(kTableSize - offset) * 2],
coeffs_tab[(kTableSize - offset) * 2 + 1]}};
*indices = {{Bound(in_loc - 1, limit), Bound(in_loc, limit),
Bound(in_loc + 1, limit), Bound(in_loc + 2, limit)}};
}
inline float Interpolate1D(const std::array<float, 4>& weights,
const std::array<float, 4>& values) {
return values[0] * weights[0] + values[1] * weights[1] +
values[2] * weights[2] + values[3] * weights[3];
}
void ResizeBicubicBaseline(TTypes<float, 4>::ConstTensor images,
TTypes<float, 4>::Tensor output) {
const int batch_size = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
ASSERT_EQ(batch_size, output.dimension(0));
ASSERT_EQ(channels, output.dimension(3));
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
const float height_scale = in_height / static_cast<float>(out_height);
const float width_scale = in_width / static_cast<float>(out_width);
std::array<float, 4> coeff = {{0.0, 0.0, 0.0, 0.0}};
for (int64_t b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
std::array<float, 4> y_weights;
std::array<int64_t, 4> y_indices;
GetWeightsAndIndices(height_scale, y, in_height, &y_weights,
&y_indices);
for (int64_t x = 0; x < out_width; ++x) {
std::array<float, 4> x_weights;
std::array<int64_t, 4> x_indices;
GetWeightsAndIndices(width_scale, x, in_width, &x_weights,
&x_indices);
for (int64_t c = 0; c < channels; ++c) {
for (int64_t i = 0; i < 4; ++i) {
const std::array<float, 4> values = {
{static_cast<float>(images(b, y_indices[i], x_indices[0], c)),
static_cast<float>(images(b, y_indices[i], x_indices[1], c)),
static_cast<float>(images(b, y_indices[i], x_indices[2], c)),
static_cast<float>(
images(b, y_indices[i], x_indices[3], c))}};
coeff[i] = Interpolate1D(x_weights, values);
}
output(b, y, x, c) = Interpolate1D(y_weights, coeff);
}
}
}
}
}
protected:
void RunRandomTest(const int batch_size, const int64_t in_height,
const int64_t in_width, const int target_height,
const int target_width, int channels) {
LOG(INFO) << "Running random test " << in_height << "x" << in_width << "x"
<< channels << " to " << target_height << "x" << target_width
<< "x" << channels;
const Tensor* input = SetRandomImageInput(
TensorShape({batch_size, in_height, in_width, channels}));
AddInputFromArray<int32>(TensorShape({2}), {target_height, target_width});
TF_ASSERT_OK(RunOpKernel());
std::unique_ptr<Tensor> expected(new Tensor(
device_->GetAllocator(AllocatorAttributes()),
DataTypeToEnum<float>::v(),
TensorShape({batch_size, target_height, target_width, channels})));
ResizeBicubicBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
test::ExpectTensorNear<float>(*expected, *GetOutput(0), 0.00001);
}
void RunManyRandomTests(int channels) {
for (int batch_size : {1, 2, 5}) {
for (int in_w : {2, 4, 7, 20, 165}) {
for (int in_h : {1, 3, 5, 8, 100, 233}) {
for (int target_height : {1, 2, 3, 50, 113}) {
for (int target_width : {target_height, target_height / 2 + 1}) {
RunRandomTest(batch_size, in_h, in_w, target_height, target_width,
channels);
}
}
}
}
}
}
};
TEST_F(ResizeBicubicOpTest, TestBicubic2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(ResizeBicubicOpTest, TestBicubic2x2To0x0) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "output dimensions must be positive"))
<< s;
}
TEST_F(ResizeBicubicOpTest, TestBicubicRandom141x186) {
RunRandomTest(2, 141, 186, 299, 299, 1 );
RunRandomTest(2, 141, 186, 299, 299, 3 );
}
TEST_F(ResizeBicubicOpTest, TestBicubicRandom183x229) {
RunRandomTest(2, 183, 229, 299, 299, 1 );
RunRandomTest(2, 183, 229, 299, 299, 3 );
}
TEST_F(ResizeBicubicOpTest, TestBicubicRandom749x603) {
RunRandomTest(2, 749, 603, 299, 299, 1 );
RunRandomTest(2, 749, 603, 299, 299, 3 );
}
TEST_F(ResizeBicubicOpTest, TestAreaRandomDataSeveralInputsSizes1Channel) {
RunManyRandomTests(1);
}
TEST_F(ResizeBicubicOpTest, TestAreaRandomDataSeveralInputsSizes3Channels) {
RunManyRandomTests(3);
}
TEST_F(ResizeBicubicOpTest, TestAreaRandomDataSeveralInputsSizes4Channels) {
RunManyRandomTests(4);
}
static Graph* ResizeBicubic(int batch_size, int size, int channels,
float scale_y = 0.3, float scale_x = 0.7) {
Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_FLOAT, TensorShape({batch_size, size, size, channels}));
input.flat<float>().setRandom();
Tensor shape(DT_INT32, TensorShape({2}));
auto shape_t = shape.flat<int32>();
shape_t(0) = scale_y * size;
shape_t(1) = scale_x * size;
test::graph::Binary(g, "ResizeBicubic", test::graph::Constant(g, input),
test::graph::Constant(g, shape));
return g;
}
#define BM_ResizeBicubicDev(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS( \
::testing::benchmark::State& state) { \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS); \
} \
BENCHMARK(BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS);
BM_ResizeBicubicDev(8, 32, 3);
BM_ResizeBicubicDev(8, 128, 3);
BM_ResizeBicubicDev(8, 512, 3);
BM_ResizeBicubicDev(8, 1024, 3);
BM_ResizeBicubicDev(16, 32, 3);
BM_ResizeBicubicDev(16, 128, 3);
BM_ResizeBicubicDev(16, 512, 3);
BM_ResizeBicubicDev(16, 1024, 3);
BM_ResizeBicubicDev(32, 32, 3);
BM_ResizeBicubicDev(32, 128, 3);
BM_ResizeBicubicDev(32, 512, 3);
BM_ResizeBicubicDev(32, 1024, 3);
#define BM_ResizeBicubicExpand(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS( \
::testing::benchmark::State& state) { \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS, 8, 8), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS * 8 * 8); \
} \
BENCHMARK(BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS);
BM_ResizeBicubicExpand(12, 48, 1);
BM_ResizeBicubicExpand(12, 48, 3);
BM_ResizeBicubicExpand(12, 48, 40);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bicubic_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bicubic_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc72dbe1-77c6-4f82-b5f5-b5b2a1d20887 | cpp | tensorflow/tensorflow | mirror_pad_op | tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc | tensorflow/core/kernels/image/mirror_pad_op_test.cc | #include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/mirror_pad_mode.h"
namespace tensorflow {
namespace {
class MirrorPadOp : public XlaOpKernel {
public:
explicit MirrorPadOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
absl::StatusOr<xla::XlaOp> DoMirrorPad(const xla::XlaOp t,
const xla::Shape& original_shape,
const xla::LiteralSlice& pad_literal,
const MirrorPadMode mode,
xla::XlaBuilder* b) {
int64_t excluded_edges = mode == MirrorPadMode::REFLECT ? 1 : 0;
xla::XlaOp accum = t;
for (int64_t dimno = original_shape.rank() - 1; dimno >= 0; --dimno) {
auto t_rev = xla::Rev(accum, {dimno});
int64_t lhs_padding = pad_literal.Get<int64_t>({dimno, 0});
int64_t rhs_padding = pad_literal.Get<int64_t>({dimno, 1});
int64_t dim_size = original_shape.dimensions(dimno);
TF_RET_CHECK(lhs_padding >= 0 &&
lhs_padding <= dim_size - excluded_edges);
TF_RET_CHECK(rhs_padding >= 0 &&
rhs_padding <= dim_size - excluded_edges);
auto lhs_pad =
xla::SliceInDim(t_rev, dim_size - excluded_edges - lhs_padding,
dim_size - excluded_edges, 1, dimno);
auto rhs_pad = xla::SliceInDim(t_rev, excluded_edges,
excluded_edges + rhs_padding, 1, dimno);
accum = xla::ConcatInDim(b, {lhs_pad, accum, rhs_pad}, dimno);
}
return accum;
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape input_shape = ctx->InputShape("input");
const TensorShape pad_shape = ctx->InputShape("paddings");
MirrorPadMode mode;
OP_REQUIRES_OK(ctx, GetNodeAttr(def(), "mode", &mode));
OP_REQUIRES(
ctx, mode == MirrorPadMode::REFLECT || mode == MirrorPadMode::SYMMETRIC,
xla::Unimplemented("Unsupported MirrorPad mode. Only SYMMETRIC and "
"REFLECT modes are currently supported"));
const int dims = input_shape.dims();
OP_REQUIRES(
ctx,
TensorShapeUtils::IsMatrix(pad_shape) && pad_shape.dim_size(1) == 2,
errors::InvalidArgument("paddings must be a matrix with 2 columns: ",
pad_shape.DebugString()));
OP_REQUIRES(
ctx, dims == pad_shape.dim_size(0),
errors::InvalidArgument(
"The first dimension of paddings must be the rank of inputs",
pad_shape.DebugString(), " ", input_shape.DebugString()));
xla::Literal pad_literal;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsInt64Literal("paddings", &pad_literal));
xla::XlaBuilder* b = ctx->builder();
auto in0 = ctx->Input("input");
absl::StatusOr<xla::Shape> in0_shape = b->GetShape(in0);
OP_REQUIRES(ctx, in0_shape.ok(), in0_shape.status());
absl::StatusOr<xla::XlaOp> accum_status =
DoMirrorPad(in0, in0_shape.value(), pad_literal, mode, b);
OP_REQUIRES_OK(ctx, accum_status.status());
ctx->SetOutput(0, accum_status.value());
}
private:
MirrorPadOp(const MirrorPadOp&) = delete;
void operator=(const MirrorPadOp&) = delete;
};
REGISTER_XLA_OP(Name("MirrorPad").CompileTimeConstantInput("paddings"),
MirrorPadOp);
class MirrorPadGradOp : public XlaOpKernel {
public:
explicit MirrorPadGradOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
absl::StatusOr<xla::XlaOp> DoMirrorPadGrad(
const xla::XlaOp t, const xla::Shape& original_shape,
const xla::LiteralSlice& pad_literal, const MirrorPadMode mode,
xla::XlaBuilder* b) {
int64_t excluded_edges = mode == MirrorPadMode::REFLECT ? 1 : 0;
xla::XlaOp grad = t;
for (int64_t dimno = original_shape.rank() - 1; dimno >= 0; --dimno) {
int64_t lhs_padding = pad_literal.Get<int64_t>({dimno, 0});
int64_t rhs_padding = pad_literal.Get<int64_t>({dimno, 1});
int64_t dim_size = original_shape.dimensions(dimno);
int64_t result_dim_size = dim_size - lhs_padding - rhs_padding;
TF_RET_CHECK(lhs_padding >= 0 &&
lhs_padding <= dim_size - excluded_edges);
TF_RET_CHECK(rhs_padding >= 0 &&
rhs_padding <= dim_size - excluded_edges);
xla::XlaOp lhs_pad = xla::SliceInDim(grad, 0, lhs_padding, 1, dimno);
xla::XlaOp reverse_lhs_pad = xla::Rev(lhs_pad, {dimno});
xla::XlaOp padded_lhs_pad = xla::PadInDim(
reverse_lhs_pad, xla::ScalarLike(reverse_lhs_pad, 0), dimno,
excluded_edges,
result_dim_size - lhs_padding - excluded_edges);
xla::XlaOp rhs_pad =
xla::SliceInDim(grad, dim_size - rhs_padding, dim_size, 1, dimno);
xla::XlaOp reverse_rhs_pad = xla::Rev(rhs_pad, {dimno});
xla::XlaOp padded_rhs_pad = xla::PadInDim(
reverse_rhs_pad, xla::ScalarLike(reverse_rhs_pad, 0), dimno,
result_dim_size - rhs_padding - excluded_edges,
excluded_edges);
xla::XlaOp grad_core =
xla::SliceInDim(grad, lhs_padding, dim_size - rhs_padding, 1, dimno);
grad = padded_lhs_pad + grad_core + padded_rhs_pad;
}
return grad;
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape input_shape = ctx->InputShape("input");
const TensorShape pad_shape = ctx->InputShape("paddings");
MirrorPadMode mode;
OP_REQUIRES_OK(ctx, GetNodeAttr(def(), "mode", &mode));
OP_REQUIRES(
ctx, mode == MirrorPadMode::REFLECT || mode == MirrorPadMode::SYMMETRIC,
xla::Unimplemented("Unsupported MirrorPadGrad mode. Only SYMMETRIC and "
"REFLECT modes are currently supported"));
const int dims = input_shape.dims();
OP_REQUIRES(
ctx,
TensorShapeUtils::IsMatrix(pad_shape) && pad_shape.dim_size(1) == 2,
errors::InvalidArgument("paddings must be a matrix with 2 columns: ",
pad_shape.DebugString()));
OP_REQUIRES(
ctx, dims == pad_shape.dim_size(0),
errors::InvalidArgument(
"The first dimension of paddings must be the rank of inputs",
pad_shape.DebugString(), " ", input_shape.DebugString()));
xla::Literal pad_literal;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsInt64Literal("paddings", &pad_literal));
xla::XlaBuilder* b = ctx->builder();
auto in0 = ctx->Input("input");
absl::StatusOr<xla::Shape> in0_shape = b->GetShape(in0);
OP_REQUIRES(ctx, in0_shape.ok(), in0_shape.status());
absl::StatusOr<xla::XlaOp> accum_status =
DoMirrorPadGrad(in0, in0_shape.value(), pad_literal, mode, b);
OP_REQUIRES_OK(ctx, accum_status.status());
ctx->SetOutput(0, accum_status.value());
}
private:
MirrorPadGradOp(const MirrorPadGradOp&) = delete;
void operator=(const MirrorPadGradOp&) = delete;
};
REGISTER_XLA_OP(Name("MirrorPadGrad").CompileTimeConstantInput("paddings"),
MirrorPadGradOp);
}
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class MirrorPadOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(const string& mode) {
TF_EXPECT_OK(NodeDefBuilder("mirror_pad_op", "MirrorPad")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_INT32))
.Attr("mode", mode)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(MirrorPadOpTest, TestMirrorPadReflect##T) { \
MakeOp<T>("REFLECT"); \
AddInputFromArray<T>(TensorShape({1, 2, 3, 1}), {1, 2, 3, 4, 5, 6}); \
AddInputFromArray<int32>(TensorShape({4, 2}), {0, 0, 1, 1, 2, 2, 0, 0}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 4, 7, 1})); \
test::FillValues<T>(&expected, \
{6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1, \
6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
} \
\
TEST_F(MirrorPadOpTest, TestMirrorPadSymmetric##T) { \
MakeOp<T>("SYMMETRIC"); \
AddInputFromArray<T>(TensorShape({1, 2, 1, 3}), {1, 2, 3, 4, 5, 6}); \
AddInputFromArray<int32>(TensorShape({4, 2}), {1, 1, 0, 0, 0, 0, 2, 2}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({3, 2, 1, 7})); \
test::FillValues<T>( \
&expected, \
{2, 1, 1, 2, 3, 3, 2, 5, 4, 4, 5, 6, 6, 5, 2, 1, 1, 2, 3, 3, 2, \
5, 4, 4, 5, 6, 6, 5, 2, 1, 1, 2, 3, 3, 2, 5, 4, 4, 5, 6, 6, 5}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(quint8)
REGISTER_TEST(qint8)
REGISTER_TEST(qint32)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
TEST_F(MirrorPadOpTest, TestMirrorPadReflectLargeInput) {
MakeOp<float>("REFLECT");
const int kInput = 1000;
const int kPad = 10;
const int kOutput = kInput + 2 * kPad;
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i;
else if (kPad <= i && i < kInput + kPad)
return i - kPad;
else if (kInput + kPad <= i && i < kOutput)
return 2 * kInput + kPad - 2 - i;
else
return -1;
});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(MirrorPadOpTest, TestMirrorPadSymmetricLargeInput) {
MakeOp<float>("SYMMETRIC");
const int kInput = 1000;
const int kPad = 10;
const int kOutput = kInput + 2 * kPad;
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i - 1;
else if (kPad <= i && i < kInput + kPad)
return i - kPad;
else if (kInput + kPad <= i && i < kOutput)
return 2 * kInput + kPad - 1 - i;
else
return -1;
});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class MirrorPadGradOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(const string& mode) {
TF_EXPECT_OK(NodeDefBuilder("mirror_pad_grad_op", "MirrorPadGrad")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_INT32))
.Attr("mode", mode)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(MirrorPadGradOpTest, TestMirrorPadGradReflect##T) { \
MakeOp<T>("REFLECT"); \
AddInput<T>(TensorShape({1, 4, 7, 1}), [](int i) -> T { return i % 7; }); \
AddInputFromArray<int32>(TensorShape({4, 2}), {0, 0, 1, 1, 2, 2, 0, 0}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 2, 3, 1})); \
test::FillValues<T>(&expected, {16, 18, 8, 16, 18, 8}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
} \
\
TEST_F(MirrorPadGradOpTest, TestMirrorPadGradSymmetric##T) { \
MakeOp<T>("SYMMETRIC"); \
AddInput<T>(TensorShape({3, 2, 1, 7}), [](int i) -> T { return i % 7; }); \
AddInputFromArray<int32>(TensorShape({4, 2}), {1, 1, 0, 0, 0, 0, 2, 2}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 2, 1, 3})); \
test::FillValues<T>(&expected, {9, 27, 27, 9, 27, 27}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/mirror_pad_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
30eb79fb-bcbc-4b84-9ad4-023e2479af8c | cpp | tensorflow/tensorflow | mkl_dequantize_op | tensorflow/core/kernels/mkl/mkl_dequantize_op.cc | tensorflow/core/kernels/mkl/mkl_dequantize_op_test.cc | #if defined(INTEL_MKL)
#define EIGEN_USE_THREADS
#include "dnnl.hpp"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/kernels/meta_support.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/mkl_util.h"
using dnnl::primitive_attr;
using dnnl::stream;
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T, typename U, bool native_format = false>
class MklDequantizeOp : public OpKernel {
public:
explicit MklDequantizeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
string mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("mode", &mode_string));
OP_REQUIRES(ctx, mode_string == "SCALED",
errors::InvalidArgument(
"MklDequantizeOp only supports 'SCALED' mode, but got '" +
mode_string + "'"));
OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_));
OP_REQUIRES(
ctx,
(ctx->output_type(0) == DT_FLOAT || ctx->output_type(0) == DT_BFLOAT16),
errors::InvalidArgument("Output type must be float or bfloat16,"
" is '" +
DataTypeString(ctx->output_type(0)) + "'"));
}
void Compute(OpKernelContext* ctx) override {
try {
auto cpu_engine = engine(engine::kind::cpu, 0);
const Tensor& src_tensor = ctx->input(kSrcIndex);
const float min_range = ctx->input(kMinIndex).template scalar<float>()();
const float max_range = ctx->input(kMaxIndex).template scalar<float>()();
auto src_tf_shape = src_tensor.shape();
auto src_dims = TFShapeToMklDnnDims(src_tensor.shape());
auto output_dims = src_dims;
MklDnnData<T> src(&cpu_engine);
MklDnnData<U> dst(&cpu_engine);
std::shared_ptr<stream> reorder_stream;
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(ctx);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
reorder_stream.reset(CreateStream(&eigen_tp, cpu_engine));
memory::format_tag dst_layout_type;
switch (src_tf_shape.dims()) {
case 1:
dst_layout_type = memory::format_tag::x;
break;
case 2:
dst_layout_type = memory::format_tag::nc;
break;
case 3:
dst_layout_type = memory::format_tag::tnc;
break;
case 4:
dst_layout_type = memory::format_tag::nhwc;
break;
case 5:
dst_layout_type = memory::format_tag::ndhwc;
break;
default:
OP_REQUIRES_OK(
ctx, errors::InvalidArgument("Input dims must be <= 5 and >= 1"));
return;
}
auto src_md = memory::desc(src_dims, MklDnnType<T>(), dst_layout_type);
src.SetUsrMem(src_md, &src_tensor);
src.SetUsrMemDataHandle(&src_tensor, reorder_stream);
Tensor* output_tensor = nullptr;
MklDnnShape output_mkl_shape;
TensorShape output_tf_shape;
memory::desc dst_md =
memory::desc(src_dims, MklDnnType<U>(), dst_layout_type);
output_mkl_shape.SetMklTensor(false);
output_tf_shape = MklDnnDimsToTFShape(output_dims);
AllocateOutputSetMklShape(ctx, 0, &output_tensor, output_tf_shape,
output_mkl_shape, native_format);
dst.SetUsrMem(dst_md, output_tensor);
dst.SetUsrMemDataHandle(output_tensor, reorder_stream);
static constexpr int num_bits = sizeof(T) * 8;
bool is_signed = std::is_signed<T>::value;
const int target_bits = is_signed ? (num_bits - 1) : num_bits;
const float v_max = static_cast<float>(uint64_t{1} << target_bits) - 1;
float v_min = 0;
if (is_signed) {
v_min = -(static_cast<float>(uint64_t{1} << target_bits));
}
if (narrow_range_) {
v_min += 1;
}
float scale_factor;
if (v_min != 0) {
scale_factor = std::max(min_range / v_min, max_range / v_max);
} else {
scale_factor = max_range / v_max;
}
std::vector<float> scales = {scale_factor};
primitive_attr attr;
#ifndef ENABLE_ONEDNN_V3
attr.set_output_scales(0, scales);
#else
attr.set_scales_mask(DNNL_ARG_SRC, 0);
auto scale_mem = memory({{static_cast<int64_t>(scales.size())},
MklDnnType<float>(),
memory::format_tag::x},
cpu_engine, scales.data());
#endif
auto reorder_pd =
ReorderPd(cpu_engine, src.GetUsrMem()->get_desc(), cpu_engine,
dst.GetUsrMem()->get_desc(), attr);
std::vector<primitive> net = {reorder(reorder_pd)};
std::vector<std::unordered_map<int, memory>> reorder_net_args;
#ifndef ENABLE_ONEDNN_V3
reorder_net_args.push_back({{DNNL_ARG_FROM, *src.GetUsrMem()},
{ DNNL_ARG_TO,
*dst.GetUsrMem() }});
#else
reorder_net_args.push_back(
{{DNNL_ARG_FROM, *src.GetUsrMem()},
{DNNL_ARG_TO, *dst.GetUsrMem()},
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, scale_mem}});
#endif
execute_primitives(net, reorder_stream, reorder_net_args);
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(
ctx, errors::Aborted("Operation received an exception:", error_msg));
}
}
private:
const size_t kSrcIndex = 0;
const size_t kMinIndex = 1;
const size_t kMaxIndex = 2;
bool narrow_range_;
};
REGISTER_KERNEL_BUILDER(Name("_MklDequantize")
.Device(DEVICE_CPU)
.TypeConstraint<quint8>("T")
.TypeConstraint<float>("dtype")
.Label(mkl_op_registry::kMklQuantizedOpLabel),
MklDequantizeOp<CPUDevice, quint8, float, true>);
REGISTER_KERNEL_BUILDER(Name("_MklDequantize")
.Device(DEVICE_CPU)
.TypeConstraint<qint8>("T")
.TypeConstraint<float>("dtype")
.Label(mkl_op_registry::kMklQuantizedOpLabel),
MklDequantizeOp<CPUDevice, qint8, float, true>);
REGISTER_KERNEL_BUILDER(Name("_MklDequantize")
.Device(DEVICE_CPU)
.TypeConstraint<quint8>("T")
.TypeConstraint<bfloat16>("dtype")
.Label(mkl_op_registry::kMklQuantizedOpLabel),
MklDequantizeOp<CPUDevice, quint8, bfloat16, true>);
REGISTER_KERNEL_BUILDER(Name("_MklDequantize")
.Device(DEVICE_CPU)
.TypeConstraint<qint8>("T")
.TypeConstraint<bfloat16>("dtype")
.Label(mkl_op_registry::kMklQuantizedOpLabel),
MklDequantizeOp<CPUDevice, qint8, bfloat16, true>);
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_MKL)
#define EIGEN_USE_THREADS
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/mkl/mkl_kernel_util.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
class MklDequantizeOpTest : public OpsTestBase {
protected:
template <typename Tinput, typename Toutput>
void RunMklDequantize(const Tensor& input_quantized,
const Tensor& min_range_float,
const Tensor& max_range_float,
const Tensor& expected_output) {
AddInputFromArray<Tinput>(input_quantized.shape(),
input_quantized.flat<Tinput>());
AddInputFromArray<float>(min_range_float.shape(),
min_range_float.flat<float>());
AddInputFromArray<float>(max_range_float.shape(),
max_range_float.flat<float>());
TF_ASSERT_OK(RunOpKernel());
const Tensor& actual_output = *GetOutput(0);
test::ExpectTensorNear<Toutput>(expected_output, actual_output, 0.1);
}
template <typename Tinput, typename Toutput>
void TestMklDequantize() {
const DataType input_dt = DataTypeToEnum<Tinput>::v();
const DataType output_dt = DataTypeToEnum<Toutput>::v();
TF_ASSERT_OK(NodeDefBuilder("dequantize_op", "_MklDequantize")
.Input(FakeInput(input_dt))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", input_dt)
.Attr("dtype", output_dt)
.Attr("mode", "SCALED")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
Tensor input_float(DT_FLOAT, {1, 2, 2, 2});
test::FillValues<float>(&input_float, {0, 10, 50, 40, 25, 115, 190, 255});
const float min_range = 0.0f;
const float max_range = 255.0f;
Tensor min_range_float(DT_FLOAT, {});
test::FillValues<float>(&min_range_float, {min_range});
Tensor max_range_float(DT_FLOAT, {});
test::FillValues<float>(&max_range_float, {max_range});
Tensor input_quantized =
FloatTensorToQuantized<Tinput>(input_float, min_range, max_range);
Tensor expected_output_float32;
MklTestingUtil::RunDequantizeOp(input_quantized, min_range_float,
max_range_float, "SCALED",
&expected_output_float32);
if (output_dt == DT_BFLOAT16) {
Tensor expected_output_bfloat16(DT_BFLOAT16, {1, 2, 2, 2});
expected_output_bfloat16.flat<bfloat16>() =
expected_output_float32.flat<float>().cast<bfloat16>();
RunMklDequantize<Tinput, Toutput>(input_quantized, min_range_float,
max_range_float,
expected_output_bfloat16);
} else {
RunMklDequantize<Tinput, Toutput>(input_quantized, min_range_float,
max_range_float,
expected_output_float32);
}
}
};
TEST_F(MklDequantizeOpTest, MklDequantize_Unsigned_Input_Float_Output) {
TestMklDequantize<quint8, float>();
}
TEST_F(MklDequantizeOpTest, MklDequantize_Signed_Input_Float_Output) {
TestMklDequantize<qint8, float>();
}
TEST_F(MklDequantizeOpTest, MklDequantize_Unsigned_Input_Bfloat16_Output) {
TestMklDequantize<quint8, bfloat16>();
}
TEST_F(MklDequantizeOpTest, MklDequantize_Signed_Input_Bfloat16_Output) {
TestMklDequantize<qint8, bfloat16>();
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_dequantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_dequantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
145e02f9-e296-4fae-91d2-7cfe3e2ddd1e | cpp | tensorflow/tensorflow | mkl_conv_ops | tensorflow/core/kernels/mkl/mkl_conv_ops.cc | tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc | #ifdef INTEL_MKL
#include "tensorflow/core/kernels/mkl/mkl_conv_ops.h"
#include <algorithm>
#include <map>
#include <string>
#include <unordered_map>
#include "absl/strings/str_join.h"
#include "tensorflow/core/kernels/mkl/mkl_kernel_util.h"
#include "tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h"
#include "tensorflow/core/kernels/no_op.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
using dnnl::convolution_forward;
using dnnl::prop_kind;
using dnnl::stream;
using ConvFwdPd = dnnl::convolution_forward::primitive_desc;
using ReorderPd = dnnl::reorder::primitive_desc;
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \
scales_mask, scales) \
append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding, scales_mask, \
scales)
#define APPEND_ELTWISE(scale, alg, alpha, beta) \
append_eltwise(scale, alg, alpha, beta)
#define GET_DATA_TYPE data_type()
#define SET_FUSE_ACTIVATION_FOR_RELU6 \
set_fuse_activation(true, dnnl::algorithm::eltwise_bounded_relu, 6.0)
#define SET_MKL_LAYOUT(md) SetMklLayout(&md)
#define OUTPUT_SCALE_DCHECK (post_op_param.name == "output_scale")
#define TSCALED_BIAS Tbias
#define SCALE scales
#define SUMMAND_SCALE_U8(summand_range, output_range) \
summand_range / output_range
#define SUMMAND_SCALE_S8(summand_range, output_range) \
255.0f * summand_range / (output_range * 127.0f)
#else
#define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \
scales_mask, scales) \
append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding)
#define APPEND_ELTWISE(scale, alg, alpha, beta) \
append_eltwise(alg, alpha, beta); \
(void)scale
#define GET_DATA_TYPE get_data_type()
#define SET_FUSE_ACTIVATION_FOR_RELU6 \
set_fuse_activation(true, dnnl::algorithm::eltwise_clip, 0.0, 6.0)
#define SET_MKL_LAYOUT(md) SetMklLayout(md)
#define OUTPUT_SCALE_DCHECK \
(post_op_param.name == "src_scale") || \
(post_op_param.name == "wei_scale") || \
(post_op_param.name == "dst_scale")
#define TSCALED_BIAS float
#define SCALE wei_scale
#define SUMMAND_SCALE_U8(summand_range, output_range) summand_range / 255.0f
#define SUMMAND_SCALE_S8(summand_range, output_range) summand_range / 127.0f
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
#define FWD_STREAM , *fwd_stream
#else
#define FWD_STREAM
#endif
namespace quantized_fusions {
string none[] = {""};
string bias[] = {"BiasAdd"};
string relu[] = {"Relu"};
string requantize[] = {"Requantize"};
string bias_relu[] = {"BiasAdd", "Relu"};
string bias_requantize[] = {"BiasAdd", "Requantize"};
string relu_requantize[] = {"Relu", "Requantize"};
string bias_relu_requantize[] = {"BiasAdd", "Relu", "Requantize"};
string bias_sum_relu[] = {"BiasAdd", "Sum", "Relu"};
string bias_sum_relu_requantize[] = {"BiasAdd", "Sum", "Relu", "Requantize"};
}
struct MklConvFwdParams {
memory::dims src_dims;
memory::dims filter_dims;
memory::dims bias_dims;
memory::dims dst_dims;
memory::dims strides;
memory::dims dilations;
memory::dims padding_left;
memory::dims padding_right;
memory::dims fuse_bn_dims;
MklTensorFormat tf_fmt;
bool native_format;
bool is_depthwise;
bool is_filter_const = false;
string dtypes = string("");
struct PostOpParam {
string name;
dnnl::algorithm alg;
std::vector<float> param;
std::string partial_key;
DataType dtype = DT_INVALID;
};
std::vector<PostOpParam> post_op_params;
MklConvFwdParams(memory::dims src_dims, memory::dims filter_dims,
memory::dims bias_dims, memory::dims dst_dims,
memory::dims strides, memory::dims dilations,
memory::dims padding_left, memory::dims padding_right,
memory::dims fuse_bn_dims, MklTensorFormat tf_fmt,
bool native_format, bool is_depthwise, bool is_filter_const)
: src_dims(src_dims),
filter_dims(filter_dims),
bias_dims(bias_dims),
dst_dims(dst_dims),
strides(strides),
dilations(dilations),
padding_left(padding_left),
padding_right(padding_right),
fuse_bn_dims(fuse_bn_dims),
tf_fmt(tf_fmt),
native_format(native_format),
is_depthwise(is_depthwise),
is_filter_const(is_filter_const) {}
};
template <typename Tinput, typename Tfilter, typename Tbias, typename Toutput>
class MklConvFwdPrimitive : public MklPrimitive {
public:
explicit MklConvFwdPrimitive(const MklConvFwdParams& convFwdDims)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.conv_fwd == nullptr) {
Setup(convFwdDims);
}
}
~MklConvFwdPrimitive() {}
dnnl::memory::desc GetScratchPadDesc() {
return context_.fwd_pd->scratchpad_desc();
}
void Execute(const Tinput* src_data, const Tfilter* filter_data,
const void* bias_data, const Toutput* dst_data,
const MklConvFwdParams& convFwdDims,
std::shared_ptr<stream> fwd_stream, void* sp_data = nullptr) {
Execute(src_data, filter_data, bias_data, dst_data, nullptr, nullptr,
nullptr, nullptr, convFwdDims, fwd_stream, sp_data);
}
void Execute(const Tinput* src_data, const Tfilter* filter_data,
const void* bias_data, const Toutput* dst_data,
const Tinput* bn_scale_data, const Tinput* bn_mean_data,
const Tinput* bn_offset_data, const Tinput* bn_rsqrt_data,
const MklConvFwdParams& convFwdDims,
std::shared_ptr<stream> fwd_stream, void* sp_data) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(src_data)) FWD_STREAM);
context_.filter_mem->set_data_handle(
static_cast<void*>(const_cast<Tfilter*>(filter_data)) FWD_STREAM);
if (bias_data != nullptr) {
context_.bias_mem->set_data_handle(const_cast<void*>(bias_data)
FWD_STREAM);
}
auto const& post_op_params = convFwdDims.post_op_params;
if (!post_op_params.empty()) {
for (auto const& post_op_param : post_op_params) {
if (post_op_param.name == "src_scale") {
context_.src_scale_mem->set_data_handle(static_cast<void*>(
const_cast<float*>(post_op_param.param.data())) FWD_STREAM);
} else if (post_op_param.name == "wei_scale") {
context_.wei_scale_mem->set_data_handle(static_cast<void*>(
const_cast<float*>(post_op_param.param.data())) FWD_STREAM);
} else if (post_op_param.name == "dst_scale") {
context_.dst_scale_mem->set_data_handle(static_cast<void*>(
const_cast<float*>(post_op_param.param.data())) FWD_STREAM);
}
}
}
if (bn_scale_data != nullptr) {
context_.bn_scale_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_scale_data)) FWD_STREAM);
context_.bn_mean_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_mean_data)) FWD_STREAM);
context_.bn_rsqrt_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_rsqrt_data)) FWD_STREAM);
context_.bn_offset_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_offset_data)) FWD_STREAM);
}
context_.dst_mem->set_data_handle(
static_cast<void*>(const_cast<Toutput*>(dst_data)) FWD_STREAM);
if (sp_data) {
context_.sp_mem->set_data_handle(static_cast<void*>(sp_data) FWD_STREAM);
}
DCHECK_EQ(context_.fwd_primitives.size(),
context_.fwd_primitives_args.size());
for (size_t i = 0; i < context_.fwd_primitives.size(); ++i) {
context_.fwd_primitives.at(i).execute(*fwd_stream,
context_.fwd_primitives_args.at(i));
}
context_.src_mem->set_data_handle(DummyData);
context_.filter_mem->set_data_handle(DummyData);
if (bias_data != nullptr) {
context_.bias_mem->set_data_handle(DummyData);
}
if (bn_scale_data != nullptr) {
context_.bn_scale_mem->set_data_handle(DummyData);
context_.bn_mean_mem->set_data_handle(DummyData);
context_.bn_rsqrt_mem->set_data_handle(DummyData);
context_.bn_offset_mem->set_data_handle(DummyData);
}
context_.dst_mem->set_data_handle(DummyData);
if (sp_data) {
context_.sp_mem->set_data_handle(DummyData);
}
}
void Execute(const Tinput* src_data, const Tfilter* filter_data,
const Toutput* dst_data, const MklConvFwdParams& convFwdDims,
std::shared_ptr<stream> fwd_stream, void* sp_data) {
Execute(src_data, filter_data, nullptr, dst_data, nullptr, nullptr, nullptr,
nullptr, convFwdDims, fwd_stream, sp_data);
}
std::shared_ptr<ConvFwdPd> GetPrimitiveDesc() const {
return context_.fwd_pd;
}
private:
struct ConvFwdContext {
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> filter_mem;
std::shared_ptr<dnnl::memory> bias_mem;
std::shared_ptr<dnnl::memory> dst_mem;
std::shared_ptr<dnnl::memory> sp_mem;
std::shared_ptr<dnnl::memory> bn_scale_mem;
std::shared_ptr<dnnl::memory> bn_mean_mem;
std::shared_ptr<dnnl::memory> bn_rsqrt_mem;
std::shared_ptr<dnnl::memory> bn_offset_mem;
std::shared_ptr<dnnl::memory> src_scale_mem;
std::shared_ptr<dnnl::memory> wei_scale_mem;
std::shared_ptr<dnnl::memory> dst_scale_mem;
#ifndef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::convolution_forward::desc> fwd_desc;
#endif
std::shared_ptr<ConvFwdPd> fwd_pd;
std::shared_ptr<dnnl::memory::desc> src_md;
std::shared_ptr<dnnl::memory::desc> filter_md;
std::shared_ptr<dnnl::memory::desc> bias_md;
std::shared_ptr<dnnl::memory::desc> dst_md;
std::shared_ptr<dnnl::memory::desc> bn_scale_md;
std::shared_ptr<dnnl::memory::desc> bn_mean_md;
std::shared_ptr<dnnl::memory::desc> bn_rsqrt_md;
std::shared_ptr<dnnl::memory::desc> bn_offset_md;
std::shared_ptr<dnnl::memory::desc> src_scale_md;
std::shared_ptr<dnnl::memory::desc> wei_scale_md;
std::shared_ptr<dnnl::memory::desc> dst_scale_md;
std::shared_ptr<dnnl::primitive> conv_fwd;
std::vector<dnnl::primitive> fwd_primitives;
std::vector<std::unordered_map<int, memory>> fwd_primitives_args;
ConvFwdContext()
: src_mem(nullptr),
filter_mem(nullptr),
bias_mem(nullptr),
dst_mem(nullptr),
sp_mem(nullptr),
bn_scale_mem(nullptr),
bn_mean_mem(nullptr),
bn_rsqrt_mem(nullptr),
bn_offset_mem(nullptr),
src_scale_mem(nullptr),
wei_scale_mem(nullptr),
dst_scale_mem(nullptr),
#ifndef ENABLE_ONEDNN_V3
fwd_desc(nullptr),
#endif
fwd_pd(nullptr),
src_md(nullptr),
filter_md(nullptr),
bias_md(nullptr),
dst_md(nullptr),
bn_scale_md(nullptr),
bn_mean_md(nullptr),
bn_rsqrt_md(nullptr),
bn_offset_md(nullptr),
src_scale_md(nullptr),
wei_scale_md(nullptr),
dst_scale_md(nullptr),
conv_fwd(nullptr) {
}
};
void Setup(const MklConvFwdParams& convFwdDims) {
memory::format_tag user_data_fmt;
if (convFwdDims.native_format) {
user_data_fmt = MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt);
} else {
user_data_fmt = memory::format_tag::any;
}
context_.src_md.reset(new memory::desc(
{convFwdDims.src_dims}, MklDnnType<Tinput>(), user_data_fmt));
if (convFwdDims.filter_dims.size() == 4 && !convFwdDims.is_filter_const &&
std::is_same<Tfilter, float>::value &&
convFwdDims.src_dims[MklDnnDims::Dim_N] == 1) {
context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims},
MklDnnType<Tfilter>(),
memory::format_tag::hwio));
} else {
context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims},
MklDnnType<Tfilter>(),
memory::format_tag::any));
}
context_.dst_md.reset(new memory::desc(
{convFwdDims.dst_dims}, MklDnnType<Toutput>(), user_data_fmt));
if (!convFwdDims.bias_dims.empty()) {
if (std::is_same<Tbias, qint32>::value) {
context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims},
MklDnnType<TSCALED_BIAS>(),
memory::format_tag::any));
} else {
context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims},
MklDnnType<Tbias>(),
memory::format_tag::any));
}
#ifndef ENABLE_ONEDNN_V3
context_.fwd_desc.reset(new convolution_forward::desc(
prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.bias_md,
*context_.dst_md, convFwdDims.strides, convFwdDims.dilations,
convFwdDims.padding_left, convFwdDims.padding_right));
} else {
context_.fwd_desc.reset(new convolution_forward::desc(
prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.dst_md,
convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left,
convFwdDims.padding_right));
#endif
}
if (!convFwdDims.fuse_bn_dims.empty()) {
const memory::format_tag fused_bn_arg_fmt =
convFwdDims.native_format
? user_data_fmt
: MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt);
context_.bn_scale_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
context_.bn_mean_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
context_.bn_rsqrt_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
context_.bn_offset_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
}
auto const& post_op_params = convFwdDims.post_op_params;
dnnl::primitive_attr post_ops_attr;
dnnl::post_ops post_ops;
post_ops_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user);
std::unordered_map<string, bool> is_scale_set;
if (!post_op_params.empty()) {
for (auto const& post_op_param : post_op_params) {
if (post_op_param.name == "activation") {
DCHECK_EQ(post_op_param.param.size(), 3);
float op_scale = post_op_param.param[0];
float op_alpha = post_op_param.param[1];
float op_beta = post_op_param.param[2];
post_ops.APPEND_ELTWISE(op_scale, post_op_param.alg, op_alpha,
op_beta);
} else if (post_op_param.name == "sum") {
DCHECK_EQ(post_op_param.param.size(), 1);
float op_scale = post_op_param.param[0];
#ifndef ENABLE_ONEDNN_V3
post_ops.append_sum(op_scale);
#else
if (post_op_param.dtype != DT_INVALID) {
if (post_op_param.dtype == DT_FLOAT) {
post_ops.append_sum(op_scale, 0,
MklDnnType<float>());
} else {
TF_CHECK_OK(absl::FailedPreconditionError(
"Summand data type is expected to be float"));
}
} else {
post_ops.append_sum(op_scale);
}
#endif
#ifndef ENABLE_ONEDNN_V3
} else if (post_op_param.name == "output_scale") {
if (post_op_param.param.size() == 1) {
post_ops_attr.set_output_scales(0, post_op_param.param);
} else {
post_ops_attr.set_output_scales(2, post_op_param.param);
}
#else
} else if (post_op_param.name == "src_scale") {
is_scale_set.insert({"src", true});
post_ops_attr.set_scales_mask(DNNL_ARG_SRC, 0);
context_.src_scale_md.reset(new memory::desc({1}, MklDnnType<float>(),
memory::format_tag::x));
context_.src_scale_mem.reset(
new memory(*context_.src_scale_md, cpu_engine_, DummyData));
} else if (post_op_param.name == "wei_scale") {
is_scale_set.insert({"wei", true});
const int scale_size = post_op_param.param.size();
const int mask = scale_size == 1 ? 0
: convFwdDims.is_depthwise ? 3
: 1;
post_ops_attr.set_scales_mask(DNNL_ARG_WEIGHTS, mask);
context_.wei_scale_md.reset(new memory::desc(
{scale_size}, MklDnnType<float>(), memory::format_tag::x));
context_.wei_scale_mem.reset(
new memory(*context_.wei_scale_md, cpu_engine_, DummyData));
} else if (post_op_param.name == "dst_scale") {
is_scale_set.insert({"dst", true});
post_ops_attr.set_scales_mask(DNNL_ARG_DST, 0);
context_.dst_scale_md.reset(new memory::desc({1}, MklDnnType<float>(),
memory::format_tag::x));
context_.dst_scale_mem.reset(
new memory(*context_.dst_scale_md, cpu_engine_, DummyData));
#endif
} else if (post_op_param.name == "fuse_bn") {
post_ops.append_binary(dnnl::algorithm::binary_sub,
*context_.bn_mean_md);
post_ops.append_binary(dnnl::algorithm::binary_mul,
*context_.bn_rsqrt_md);
post_ops.append_binary(dnnl::algorithm::binary_mul,
*context_.bn_scale_md);
post_ops.append_binary(dnnl::algorithm::binary_add,
*context_.bn_offset_md);
} else {
DCHECK((post_op_param.name == "activation") ||
(post_op_param.name == "sum") || OUTPUT_SCALE_DCHECK ||
(post_op_param.name == "fuse_bn"));
}
}
post_ops_attr.set_post_ops(post_ops);
}
#ifndef ENABLE_ONEDNN_V3
context_.fwd_pd.reset(
new ConvFwdPd(*context_.fwd_desc, post_ops_attr, cpu_engine_));
#else
if (!convFwdDims.bias_dims.empty()) {
context_.fwd_pd.reset(new ConvFwdPd(
cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.bias_md,
*context_.dst_md, convFwdDims.strides, convFwdDims.dilations,
convFwdDims.padding_left, convFwdDims.padding_right, post_ops_attr));
} else {
context_.fwd_pd.reset(new ConvFwdPd(
cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.dst_md,
convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left,
convFwdDims.padding_right, post_ops_attr));
}
#endif
context_.src_mem.reset(
new memory(context_.fwd_pd.get()->src_desc(), cpu_engine_, DummyData));
context_.filter_mem.reset(new memory(context_.fwd_pd.get()->weights_desc(),
cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(context_.fwd_pd.get()->dst_desc(), cpu_engine_, DummyData));
context_.conv_fwd.reset(new convolution_forward(*context_.fwd_pd));
auto scratchpad_md = context_.fwd_pd->scratchpad_desc();
context_.sp_mem.reset(
new dnnl::memory(scratchpad_md, cpu_engine_, DummyData));
std::unordered_map<int, memory> net_args;
if (!convFwdDims.bias_dims.empty()) {
context_.bias_mem.reset(new memory(context_.fwd_pd.get()->bias_desc(),
cpu_engine_, DummyData));
net_args = {{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_WEIGHTS, *context_.filter_mem},
{DNNL_ARG_BIAS, *context_.bias_mem},
{DNNL_ARG_SCRATCHPAD, *context_.sp_mem},
{DNNL_ARG_DST, *context_.dst_mem}};
#ifdef ENABLE_ONEDNN_V3
if (is_scale_set["src"] && is_scale_set["wei"] && is_scale_set["dst"]) {
net_args.insert(
{{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem},
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem},
{ DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST,
*context_.dst_scale_mem }});
}
#endif
} else if (!convFwdDims.fuse_bn_dims.empty()) {
context_.bn_scale_mem.reset(
new memory(*context_.bn_scale_md, cpu_engine_, DummyData));
context_.bn_mean_mem.reset(
new memory(*context_.bn_mean_md, cpu_engine_, DummyData));
context_.bn_offset_mem.reset(
new memory(*context_.bn_offset_md, cpu_engine_, DummyData));
context_.bn_rsqrt_mem.reset(
new memory(*context_.bn_rsqrt_md, cpu_engine_, DummyData));
net_args = {{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_WEIGHTS, *context_.filter_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_SCRATCHPAD, *context_.sp_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(0) | DNNL_ARG_SRC_1,
*context_.bn_mean_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(1) | DNNL_ARG_SRC_1,
*context_.bn_rsqrt_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(2) | DNNL_ARG_SRC_1,
*context_.bn_scale_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(3) | DNNL_ARG_SRC_1,
*context_.bn_offset_mem}};
} else {
net_args = {{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_WEIGHTS, *context_.filter_mem},
{DNNL_ARG_SCRATCHPAD, *context_.sp_mem},
{DNNL_ARG_DST, *context_.dst_mem}};
#ifdef ENABLE_ONEDNN_V3
if (is_scale_set["src"] && is_scale_set["wei"] && is_scale_set["dst"]) {
net_args.insert(
{{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem},
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem},
{ DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST,
*context_.dst_scale_mem }});
}
#endif
}
context_.fwd_primitives_args.push_back(net_args);
context_.fwd_primitives.push_back(*context_.conv_fwd);
}
struct ConvFwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename Tinput, typename Tfilter, typename Tbias, typename Toutput>
class MklConvFwdPrimitiveFactory : public MklPrimitiveFactory<float> {
public:
static MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>* Get(
const MklConvFwdParams& convFwdDims, bool do_not_cache) {
MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>* conv_fwd = nullptr;
if (do_not_cache) {
conv_fwd =
new MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>(convFwdDims);
} else {
conv_fwd =
dynamic_cast<MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>*>(
MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias,
Toutput>::GetInstance()
.GetConvFwd(convFwdDims));
if (conv_fwd == nullptr) {
conv_fwd = new MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>(
convFwdDims);
MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias,
Toutput>::GetInstance()
.SetConvFwd(convFwdDims, conv_fwd);
}
}
return conv_fwd;
}
private:
MklConvFwdPrimitiveFactory() {}
~MklConvFwdPrimitiveFactory() {}
static const int kDilationH = 0, kDilationW = 1;
static MklConvFwdPrimitiveFactory& GetInstance() {
static MklConvFwdPrimitiveFactory instance_;
return instance_;
}
static string CreateKey(const MklConvFwdParams& convFwdDims) {
string prefix = "conv_fwd_";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(convFwdDims.src_dims);
key_creator.AddAsKey(convFwdDims.filter_dims);
key_creator.AddAsKey(convFwdDims.bias_dims);
key_creator.AddAsKey(convFwdDims.dst_dims);
key_creator.AddAsKey(convFwdDims.strides);
key_creator.AddAsKey(convFwdDims.dilations);
key_creator.AddAsKey(convFwdDims.padding_left);
key_creator.AddAsKey(convFwdDims.padding_right);
key_creator.AddAsKey(convFwdDims.dtypes);
if (convFwdDims.native_format) {
key_creator.AddAsKey(convFwdDims.tf_fmt);
}
for (auto const& post_op_param : convFwdDims.post_op_params) {
key_creator.AddAsKey(post_op_param.name);
if (post_op_param.name == "activation") {
key_creator.AddAsKey(post_op_param.alg);
DCHECK_EQ(post_op_param.param.size(), 3);
for (auto& param : post_op_param.param) {
key_creator.AddAsKey(param);
}
} else if (post_op_param.name == "sum") {
DCHECK_EQ(post_op_param.param.size(), 1);
for (auto& param : post_op_param.param) {
key_creator.AddAsKey(param);
}
#ifndef ENABLE_ONEDNN_V3
} else if (post_op_param.name == "output_scale") {
#else
} else if (post_op_param.name == "src_scale" ||
post_op_param.name == "wei_scale" ||
post_op_param.name == "dst_scale") {
#endif
key_creator.AddAsKey(post_op_param.partial_key);
} else if (post_op_param.name == "fuse_bn") {
key_creator.AddAsKey(post_op_param.name);
key_creator.AddAsKey(convFwdDims.fuse_bn_dims);
} else {
return string("not_a_key");
}
}
return key_creator.GetKey();
}
MklPrimitive* GetConvFwd(const MklConvFwdParams& convFwdDims) {
string key = CreateKey(convFwdDims);
return this->GetOp(key);
}
void SetConvFwd(const MklConvFwdParams& convFwdDims, MklPrimitive* op) {
string key = CreateKey(convFwdDims);
this->SetOp(key, op);
}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool bias_enabled, bool pad_enabled, bool is_depthwise,
bool native_format>
class MklConvOp : public OpKernel {
public:
~MklConvOp() {}
explicit MklConvOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_));
OP_REQUIRES(
context,
!(context->HasAttr("padding_list") &&
context->HasAttr("explicit_paddings")),
absl::InvalidArgumentError("Can only have 1 `padding` list at most"));
if (context->HasAttr("padding_list")) {
OP_REQUIRES_OK(context, context->GetAttr("padding_list", &padding_list_));
}
if (context->HasAttr("explicit_paddings")) {
OP_REQUIRES_OK(context,
context->GetAttr("explicit_paddings", &padding_list_));
}
OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_));
OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format_str_));
OP_REQUIRES(context, FormatFromString(data_format_str_, &data_format_),
absl::InvalidArgumentError("Invalid data format"));
OP_REQUIRES(context, (strides_.size() == 4 || strides_.size() == 5),
absl::InvalidArgumentError("Sliding window strides field must "
"specify 4 or 5 dimensions"));
const int64 stride_n = GetTensorDim(strides_, data_format_, 'N');
const int64 stride_c = GetTensorDim(strides_, data_format_, 'C');
OP_REQUIRES(
context, stride_n == 1 && stride_c == 1,
absl::UnimplementedError("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_));
is_filter_const_ = false;
if (AreWeightsFrozen()) {
is_filter_const_ = true;
} else if (context->HasAttr("is_filter_const")) {
OP_REQUIRES_OK(context,
context->GetAttr("is_filter_const", &is_filter_const_));
}
if (strides_.size() == 4) {
OP_REQUIRES(
context, dilations_.size() == 4,
absl::InvalidArgumentError("Sliding window dilations field must "
"specify 4 dimensions"));
const int64 dilation_n = GetTensorDim(dilations_, data_format_, 'N');
const int64 dilation_c = GetTensorDim(dilations_, data_format_, 'C');
const int64 dilation_h = GetTensorDim(dilations_, data_format_, 'H');
const int64 dilation_w = GetTensorDim(dilations_, data_format_, 'W');
OP_REQUIRES(context, dilation_n == 1 && dilation_c == 1,
absl::InvalidArgumentError(
"Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
OP_REQUIRES(
context, dilation_h > 0 && dilation_w > 0,
absl::InvalidArgumentError("Dilated rates should be larger than 0."));
} else if (strides_.size() == 5) {
OP_REQUIRES(context, dilations_.size() == 5,
absl::InvalidArgumentError("Dilation rates field must "
"specify 5 dimensions"));
OP_REQUIRES(context,
(GetTensorDim(dilations_, data_format_, 'N') == 1 &&
GetTensorDim(dilations_, data_format_, 'C') == 1),
absl::InvalidArgumentError(
"Current implementation does not yet support "
"dilations rates in the batch and depth dimensions."));
OP_REQUIRES(
context,
(GetTensorDim(dilations_, data_format_, '0') > 0 &&
GetTensorDim(dilations_, data_format_, '1') > 0 &&
GetTensorDim(dilations_, data_format_, '2') > 0),
absl::InvalidArgumentError("Dilated rates should be larger than 0."));
}
}
void Compute(OpKernelContext* context) override {
try {
const Tensor& src_tensor = MklGetInput(context, kInputIndex_Src);
const Tensor& filter_tensor = MklGetInput(context, kInputIndex_Filter);
OP_REQUIRES(
context, filter_tensor.NumElements() > 0,
absl::InvalidArgumentError("filter must not have zero elements "
"(i.e. all dimensions must be non-zero)"));
if (std::is_same<Tinput, float>::value) {
(void)SetFPMathMode();
}
MklDnnShape src_mkl_shape, filter_mkl_shape;
GetMklShape(context, kInputIndex_Src, &src_mkl_shape, native_format);
GetMklShape(context, kInputIndex_Filter, &filter_mkl_shape,
native_format);
OP_REQUIRES(context, !filter_mkl_shape.IsMklTensor(),
absl::InvalidArgumentError("Filter should not be in "
"Mkl Layout"));
MklDnnData<Tinput> src(&cpu_engine_);
MklDnnData<Tfilter> filter(&cpu_engine_);
memory::dims src_dims, filter_dims, padding_left, padding_right,
dilations, strides;
memory::dims dst_dims_tf_order, dst_dims_mkl_order;
bool pad_attr_enabled = false;
for (auto const& padding_val : padding_list_) {
if (padding_val) {
pad_attr_enabled = true;
break;
}
}
if (fuse_pad_ || pad_attr_enabled) {
PadWithConvFusion(context, padding_left, padding_right,
pad_attr_enabled, data_format_str_);
}
MklDnnConvUtil conv_utl(context, strides_, padding_, data_format_,
dilations_);
auto src_tf_shape = GetTfShape(context, kInputIndex_Src, native_format);
auto filter_tf_shape =
GetTfShape(context, kInputIndex_Filter, native_format);
bool is_grouped_convolution = false;
conv_utl.GetConvFwdSizesInMklOrder(
src_tf_shape, filter_tf_shape, &src_dims, &filter_dims, &strides,
&dilations, &dst_dims_tf_order, &dst_dims_mkl_order, &padding_left,
&padding_right, &is_grouped_convolution,
(fuse_pad_ || pad_attr_enabled), is_depthwise);
if (!context->status().ok()) return;
TensorShape dst_tf_shape = MklDnnDimsToTFShape(dst_dims_tf_order);
Tensor* dst_tensor = nullptr;
bool emit_filter_output = (typeid(Tinput) == typeid(Tfilter) &&
typeid(Tinput) == typeid(Toutput) &&
(typeid(Tinput) == typeid(float) ||
typeid(Tinput) == typeid(bfloat16))) &&
!native_format;
if (dst_tf_shape.num_elements() == 0 || dst_dims_tf_order[0] == 0) {
MklDnnShape dst_mkl_shape;
dst_mkl_shape.SetMklTensor(false);
AllocateOutputSetMklShape(context, kOutputIndex_Dst, &dst_tensor,
src_tf_shape, dst_mkl_shape, native_format);
filter_mkl_shape.SetMklTensor(false);
Tensor* output_filter_tensor = nullptr;
if (emit_filter_output) {
filter_mkl_shape.SetMklTensor(false);
AllocateOutputSetMklShape(context, kOutputIndex_Filter,
&output_filter_tensor, filter_tf_shape,
filter_mkl_shape);
}
return;
}
bool is_conv2d = (strides_.size() == 4);
bool is_conv3d = (strides_.size() == 5);
if (!is_conv2d && !is_conv3d) {
OP_REQUIRES(context, !pad_enabled,
absl::InvalidArgumentError(
"Pad + Conv fusion only works for 2D/3D"));
OP_REQUIRES(
context, !fuse_pad_,
absl::InvalidArgumentError("Pad+Conv fusion only works for 2D/3D"));
}
if (is_depthwise) {
OP_REQUIRES(context, is_conv2d,
absl::InvalidArgumentError(
"Only 2D convolution is supported for depthwise."));
}
auto tf_fmt = is_conv2d ? TFDataFormatToMklDnnDataFormat(data_format_)
: TFDataFormatToMklDnn3DDataFormat(data_format_);
auto mkl_fmt_tag = MklTensorFormatToMklDnnDataFormat(tf_fmt);
OP_REQUIRES(context, mkl_fmt_tag != memory::format_tag::undef,
absl::InvalidArgumentError("Invalid data format"));
auto src_md =
src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetMklLayout()
: memory::desc(src_dims, MklDnnType<Tinput>(), mkl_fmt_tag);
src.SetUsrMem(src_md, &src_tensor);
auto filter_format = is_conv2d ? ((is_depthwise || is_grouped_convolution)
? memory::format_tag::hwigo
: memory::format_tag::hwio)
: memory::format_tag::dhwio;
DCHECK(!filter_mkl_shape.IsMklTensor());
auto filter_md =
filter_mkl_shape.IsMklTensor()
? filter_mkl_shape.GetMklLayout()
: memory::desc(filter_dims, MklDnnType<Tfilter>(), filter_format);
filter.SetUsrMem(filter_md, &filter_tensor);
for (int i = 0; i < dilations.size(); ++i) --dilations[i];
bool do_not_cache =
MklPrimitiveFactory<Tinput>::IsPrimitiveMemOptEnabled() &&
(src_dims[MklDnnDims::Dim_N] > kSmallBatchSize) &&
(MklPrimitiveFactory<Tinput>::IsLegacyPlatform() ||
IsConv1x1StrideNot1(filter_dims, strides));
MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Ttemp_output>* conv_fwd =
nullptr;
memory::dims bias_dims = {};
if (fuse_biasadd_) {
conv_utl.GetBiasSizeInMklOrder(kInputIndex_Bias, &bias_dims);
}
memory::dims fuse_bn_dims = {};
TensorShape fuse_bn_shape;
if (fuse_bn_) {
fuse_bn_shape = MklGetInput(context, kInputIndex_BN_Mean).shape();
OP_REQUIRES(context, fuse_bn_shape.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("FusedBatchNorm must be 1D, not: ",
fuse_bn_shape.DebugString())));
fuse_bn_dims = {1, fuse_bn_shape.dim_size(0), 1, 1};
}
MklConvFwdParams convFwdDims(
src_dims, filter_dims, fuse_biasadd_ ? bias_dims : NONE_DIMS,
dst_dims_mkl_order, strides, dilations, padding_left, padding_right,
fuse_bn_dims, tf_fmt, native_format, is_depthwise, is_filter_const_);
this->ExtendConvFwdParams(context, convFwdDims);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
conv_fwd =
MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias, Ttemp_output>::Get(
convFwdDims, do_not_cache);
MklDnnShape output_mkl_shape;
std::shared_ptr<ConvFwdPd> conv_fwd_pd = conv_fwd->GetPrimitiveDesc();
AllocateOutputTensor(context, *conv_fwd_pd, dst_dims_mkl_order, tf_fmt,
&output_mkl_shape, &dst_tensor);
Tensor* filter_out_tensor = nullptr;
if (emit_filter_output) {
AllocateFilterOutputTensor(context, *conv_fwd_pd,
TFShapeToMklDnnDims(filter_tf_shape),
&filter_out_tensor);
}
Ttemp_output* dst_data =
reinterpret_cast<Ttemp_output*>(dst_tensor->flat<Toutput>().data());
Tinput* src_data = nullptr;
if (src_md != conv_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(conv_fwd_pd->src_desc(), cpu_engine_, context);
src_data = static_cast<Tinput*>(src.GetOpMem().get_data_handle());
} else {
src_data = static_cast<Tinput*>(
const_cast<Tinput*>(src_tensor.flat<Tinput>().data()));
}
Tfilter* filter_data = nullptr;
if (filter_md != conv_fwd_pd->weights_desc()) {
bool is_filter_cached = false;
if (is_filter_const_) {
if (IsFilterCacheEmpty(context)) {
CacheFilter(context, conv_fwd_pd, filter_data, filter_tensor,
filter, filter_md, filter_mkl_shape);
}
filter_data = GetCachedFilter(context, conv_fwd_pd->weights_desc());
is_filter_cached = (filter_data != nullptr);
}
if (!is_filter_cached) {
filter.SetUsrMem(filter_md, &filter_tensor);
if (filter_out_tensor == nullptr) {
filter.CheckReorderToOpMem(conv_fwd_pd->weights_desc(), cpu_engine_,
context);
} else {
filter.CheckReorderToOpMem(
conv_fwd_pd->weights_desc(),
filter.GetTensorBuffer(filter_out_tensor), cpu_engine_,
context);
}
filter_data =
static_cast<Tfilter*>(filter.GetOpMem().get_data_handle());
}
} else {
filter_data = static_cast<Tfilter*>(
const_cast<Tfilter*>(filter_tensor.flat<Tfilter>().data()));
}
UserScratchPad<unsigned char> scratch_pad;
scratch_pad.AllocateSPTensor(conv_fwd, context);
std::shared_ptr<stream> fwd_cpu_stream;
fwd_cpu_stream.reset(CreateStream(&eigen_tp, conv_fwd->GetEngine()));
if (fuse_biasadd_) {
const Tensor& bias_tensor = MklGetInput(context, kInputIndex_Bias);
void* bias_data =
this->GetBiasHandle(context, conv_fwd_pd, bias_tensor);
conv_fwd->Execute(src_data, filter_data, bias_data, dst_data,
convFwdDims, fwd_cpu_stream, scratch_pad.Get());
} else if (fuse_bn_) {
const Tensor& bn_scale_tensor =
MklGetInput(context, kInputIndex_BN_Scale);
Tinput* bn_scale_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_scale_tensor.flat<Tinput>().data()));
const Tensor& bn_mean_tensor =
MklGetInput(context, kInputIndex_BN_Mean);
Tinput* bn_mean_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_mean_tensor.flat<Tinput>().data()));
const Tensor& bn_offset_tensor =
MklGetInput(context, kInputIndex_BN_Offset);
Tinput* bn_offset_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_offset_tensor.flat<Tinput>().data()));
Tensor bn_rsqrt_tensor;
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<Tinput>::v(),
fuse_bn_shape, &bn_rsqrt_tensor));
Tinput* bn_rsqrt_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_rsqrt_tensor.flat<Tinput>().data()));
this->ComputeBNScale(context, epsilon_, kInputIndex_BN_Variance,
bn_rsqrt_data);
conv_fwd->Execute(src_data, filter_data, nullptr, dst_data,
bn_scale_data, bn_mean_data, bn_offset_data,
bn_rsqrt_data, convFwdDims, fwd_cpu_stream,
scratch_pad.Get());
} else {
conv_fwd->Execute(src_data, filter_data, dst_data, convFwdDims,
fwd_cpu_stream, scratch_pad.Get());
}
if (do_not_cache) delete conv_fwd;
} catch (dnnl::error& e) {
string error_msg = tensorflow::strings::StrCat(
"Status: ", e.status, ", message: ", string(e.message), ", in file ",
__FILE__, ":", __LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
}
void PadWithConvFusion(OpKernelContext* context, memory::dims& padding_left,
memory::dims& padding_right, bool pad_attr_enabled,
string data_format_str_) {
Tpadding* paddings = nullptr;
if (pad_attr_enabled) {
paddings = padding_list_.data();
} else {
const Tensor& paddings_tf = MklGetInput(context, input_index_pad_);
OP_REQUIRES(context, paddings_tf.dims() == 2,
absl::InvalidArgumentError(
absl::StrCat("paddings must be 2-dimensional: ",
paddings_tf.shape().DebugString())));
paddings = static_cast<Tpadding*>(
const_cast<Tpadding*>(paddings_tf.flat<Tpadding>().data()));
}
int64 pad_top = 0, pad_left = 0, pad_front = 0;
int64 pad_bottom = 0, pad_right = 0, pad_back = 0;
if (data_format_str_ == "NHWC") {
pad_top = paddings[2];
pad_bottom = paddings[3];
pad_left = paddings[4];
pad_right = paddings[5];
} else if (data_format_str_ == "NCHW") {
pad_top = paddings[4];
pad_bottom = paddings[5];
pad_left = paddings[6];
pad_right = paddings[7];
} else if (data_format_str_ == "NDHWC") {
pad_front = paddings[2];
pad_back = paddings[3];
pad_top = paddings[4];
pad_bottom = paddings[5];
pad_left = paddings[6];
pad_right = paddings[7];
} else if (data_format_str_ == "NCDHW") {
pad_front = paddings[4];
pad_back = paddings[5];
pad_top = paddings[6];
pad_bottom = paddings[7];
pad_left = paddings[8];
pad_right = paddings[9];
}
if (data_format_str_ == "NHWC" || data_format_str_ == "NCHW") {
padding_left = {static_cast<int>(pad_top), static_cast<int>(pad_left)};
padding_right = {static_cast<int>(pad_bottom),
static_cast<int>(pad_right)};
} else if (data_format_str_ == "NDHWC" || data_format_str_ == "NCDHW") {
padding_left = {static_cast<int>(pad_front), static_cast<int>(pad_top),
static_cast<int>(pad_left)};
padding_right = {static_cast<int>(pad_back), static_cast<int>(pad_bottom),
static_cast<int>(pad_right)};
}
}
protected:
void set_input_add_idx(int input_add_idx) {
input_index_add_ = input_add_idx;
}
int get_input_add_idx() { return input_index_add_; }
void set_fuse_biasadd(bool fuse_biasadd) { fuse_biasadd_ = fuse_biasadd; }
bool get_fuse_biasadd() { return fuse_biasadd_; }
void set_fuse_activation(bool fuse_activation, dnnl::algorithm activation_alg,
float alpha_or_upbound = 0.0, float beta = 0.0) {
fuse_activation_ = fuse_activation;
activation_alg_ = activation_alg;
alpha_or_upbound_ = alpha_or_upbound;
beta_ = beta;
}
void set_fuse_pad(bool fuse_pad) {
fuse_pad_ = fuse_pad;
if (fuse_bn_) {
input_index_pad_ = 6;
} else if (fuse_add_ && fuse_biasadd_) {
input_index_pad_ = 4;
} else {
input_index_pad_ = 3;
}
}
void set_fuse_add(bool fuse_add) { fuse_add_ = fuse_add; }
bool get_fuse_add() { return fuse_add_; };
void set_fuse_bn(bool fuse_bn, float epsilon) {
fuse_bn_ = fuse_bn;
epsilon_ = epsilon;
}
virtual void ComputeBNScale(OpKernelContext* context, float epsilon,
int bn_variance_index, Tinput* scale_buf_ptr) {
OP_REQUIRES(context, false,
absl::UnimplementedError(
"Compute BN scale not expected in base class"));
return;
}
virtual void ExtendConvFwdParams(OpKernelContext* context,
MklConvFwdParams& params) {
params.dtypes.append(typeid(Tinput).name());
params.dtypes.append(typeid(Tfilter).name());
params.dtypes.append(typeid(Tbias).name());
params.dtypes.append(typeid(Toutput).name());
bool is_quantized_input = std::is_same<Tinput, quint8>::value ||
std::is_same<Tinput, qint8>::value;
if (!is_quantized_input) {
if (fuse_add_) {
params.post_op_params.push_back(
{"sum", dnnl::algorithm::undef, {1.0}, ""});
}
if (fuse_bn_) {
params.post_op_params.push_back(
{"fuse_bn", dnnl::algorithm::undef, {1.0}, ""});
}
if (fuse_activation_) {
params.post_op_params.push_back({"activation",
activation_alg_,
{1.0, alpha_or_upbound_, beta_},
""});
}
}
}
virtual void* GetBiasHandle(OpKernelContext* context,
std::shared_ptr<ConvFwdPd>& conv2d_fwd_pd,
const Tensor& bias_tensor) {
if (fuse_biasadd_) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
}
return nullptr;
}
virtual void AllocateOutputTensor(OpKernelContext* context,
const ConvFwdPd& conv_prim_desc,
const memory::dims& output_dims_mkl_order,
MklTensorFormat output_tf_format,
MklDnnShape* output_mkl_shape,
Tensor** output_tensor) {
DCHECK(output_tensor);
#ifndef ENABLE_ONEDNN_V3
auto dst_md = conv_prim_desc.dst_desc();
if (!std::is_same<Ttemp_output, Toutput>::value) {
#ifndef ENABLE_ONEDNN_V3
dst_md.data.data_type =
static_cast<dnnl_data_type_t>(MklDnnType<Toutput>());
#else
dst_md =
memory::desc(output_dims_mkl_order, MklDnnType<Toutput>(),
MklTensorFormatToMklDnnDataFormat(output_tf_format));
#endif
}
#else
auto dst_md =
std::is_same<Ttemp_output, Toutput>::value
? conv_prim_desc.dst_desc()
: memory::desc(conv_prim_desc.dst_desc().get_dims(),
MklDnnType<Toutput>(),
MklTensorFormatToMklDnnDataFormat(output_tf_format));
#endif
output_mkl_shape->SetMklTensor(true);
output_mkl_shape->SET_MKL_LAYOUT(dst_md);
output_mkl_shape->SetElemType(MklDnnType<Toutput>());
output_mkl_shape->SetTfLayout(output_dims_mkl_order.size(),
output_dims_mkl_order, output_tf_format);
TensorShape output_tf_shape;
output_tf_shape.AddDim((dst_md.get_size() / sizeof(Toutput)));
if (native_format) {
output_tf_shape = output_mkl_shape->GetTfShape();
}
bool is_quantized_input = std::is_same<Tinput, quint8>::value ||
std::is_same<Tinput, qint8>::value;
if (fuse_add_ && !is_quantized_input) {
const Tensor& add_tensor = MklGetInput(context, input_index_add_);
MklDnnShape add_mkl_shape;
GetMklShape(context, input_index_add_, &add_mkl_shape, native_format);
if (native_format && context->forward_input_to_output_with_shape(
input_index_add_, kOutputIndex_Dst,
output_tf_shape, output_tensor)) {
return;
}
if (!native_format && add_mkl_shape == *output_mkl_shape &&
ForwardMklTensorInToOutWithMklShape(context, input_index_add_,
kOutputIndex_Dst, output_tensor,
add_mkl_shape, false)) {
return;
} else {
AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor,
output_tf_shape, *output_mkl_shape,
native_format);
auto output_format_tag = MklTensorFormatToMklDnnDataFormat(
output_mkl_shape->GetTfDataFormat());
OP_REQUIRES(context, output_format_tag != memory::format_tag::undef,
absl::InvalidArgumentError(
"MklConvOp: AddN fusion: Invalid data format"));
auto add_md =
add_mkl_shape.IsMklTensor()
? add_mkl_shape.GetMklLayout()
: memory::desc(output_dims_mkl_order, MklDnnType<Toutput>(),
output_format_tag);
void* add_buf = static_cast<void*>(
const_cast<Toutput*>(add_tensor.flat<Toutput>().data()));
void* dst_buf =
static_cast<void*>((*output_tensor)->flat<Ttemp_output>().data());
if (native_format) {
add_md = dst_md =
memory::desc({add_tensor.NumElements()}, MklDnnType<Toutput>(),
dnnl::memory::format_tag::x);
}
fuse_add_src_.reset(new memory(add_md, this->cpu_engine_, add_buf));
fuse_add_dst_.reset(new memory(dst_md, this->cpu_engine_, dst_buf));
auto reorder_desc =
ReorderPd(this->cpu_engine_, add_md, this->cpu_engine_, dst_md);
CreateAndExecuteReorder(reorder_desc, *fuse_add_src_, *fuse_add_dst_,
this->cpu_engine_, context);
}
} else {
AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor,
output_tf_shape, *output_mkl_shape,
native_format);
}
}
engine cpu_engine_ = engine(engine::kind::cpu, 0);
private:
std::shared_ptr<dnnl::memory> fuse_add_src_;
std::shared_ptr<dnnl::memory> fuse_add_dst_;
std::vector<int32> strides_;
std::vector<int32> dilations_;
std::vector<Tpadding> padding_list_;
bool is_filter_const_;
mutex mu_;
Padding padding_;
string data_format_str_;
TensorFormat data_format_;
Tensor cached_filter_data_ TF_GUARDED_BY(mu_);
#ifndef ENABLE_ONEDNN_V3
Tensor cached_filter_md_ TF_GUARDED_BY(mu_);
#else
FilterMemoryDesc cached_filter_md_ TF_GUARDED_BY(mu_);
#endif
bool fuse_biasadd_ = bias_enabled;
bool fuse_activation_ = false;
bool fuse_pad_ = pad_enabled;
bool fuse_add_ = false;
bool fuse_bn_ = false;
float epsilon_ = 0.0001;
float alpha_or_upbound_ = 0.0;
float beta_ = 0.0;
dnnl::algorithm activation_alg_ = dnnl::algorithm::undef;
int input_index_pad_ = 2;
int input_index_add_ = 3;
const int kInputIndex_Src = 0, kInputIndex_Filter = 1, kInputIndex_Bias = 2;
const int kOutputIndex_Dst = 0, kOutputIndex_Filter = 1;
const int kDilationH = 0, kDilationW = 1;
const int kInputIndex_BN_Scale = 2, kInputIndex_BN_Offset = 3;
const int kInputIndex_BN_Mean = 4, kInputIndex_BN_Variance = 5;
MklTensorFormat GetFilterTfDataFormat(const MklDnnShape* filter_mkl_shape,
const ConvFwdPd& conv_prim_desc) const {
DCHECK(filter_mkl_shape);
return filter_mkl_shape->GetTfDataFormat();
}
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,
Tensor** filter_tensor,
const MklDnnShape* filter_mkl_shape)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
DCHECK(filter_tensor);
TensorShape filter_tf_shape;
filter_tf_shape.AddDim(
(conv_prim_desc.weights_desc().get_size() / sizeof(Tfilter)));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<Tfilter>::value,
filter_tf_shape, &cached_filter_data_));
*filter_tensor = &cached_filter_data_;
memory::desc weights_desc = conv_prim_desc.weights_desc();
#ifndef ENABLE_ONEDNN_V3
TensorShape cached_filter_md_shape;
cached_filter_md_shape.AddDim(sizeof(weights_desc) / sizeof(uint8));
OP_REQUIRES_OK(context,
context->allocate_temp(DT_UINT8, cached_filter_md_shape,
&cached_filter_md_));
*reinterpret_cast<memory::desc*>(cached_filter_md_.flat<uint8>().data()) =
weights_desc;
#else
cached_filter_md_ = FilterMemoryDesc(
weights_desc.get_ndims(), weights_desc.get_inner_nblks(),
weights_desc.get_data_type(), weights_desc.get_dims(),
weights_desc.get_inner_blks(), weights_desc.get_inner_idxs(),
weights_desc.get_strides());
#endif
}
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,
Tensor** filter_tensor) {
AllocateTensor(context, conv_prim_desc, filter_tensor, nullptr);
}
void AllocateFilterOutputTensor(OpKernelContext* context,
const ConvFwdPd& conv_prim_desc,
const memory::dims& filter_dims_tf_order,
Tensor** filter_tensor) {
DCHECK(filter_tensor);
auto filter_md = conv_prim_desc.weights_desc();
MklDnnShape filter_mkl_shape;
filter_mkl_shape.SetMklTensor(true);
filter_mkl_shape.SET_MKL_LAYOUT(filter_md);
filter_mkl_shape.SetElemType(MklDnnType<Tfilter>());
filter_mkl_shape.SetTfLayout(filter_dims_tf_order.size(),
filter_dims_tf_order,
MklTensorFormat::FORMAT_BLOCKED);
TensorShape filter_tf_shape;
filter_tf_shape.AddDim((filter_md.get_size() / sizeof(Tfilter)));
AllocateOutputSetMklShape(context, kOutputIndex_Filter, filter_tensor,
filter_tf_shape, filter_mkl_shape);
}
inline bool IsFilterCacheEmpty(OpKernelContext* context)
TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
const Tensor& cached_filter_data_tensor = cached_filter_data_;
return (cached_filter_data_tensor.NumElements() == 0);
}
void CacheFilter(OpKernelContext* context,
const std::shared_ptr<ConvFwdPd>& conv_fwd_pd,
Tfilter* filter_data, const Tensor& filter_tensor,
MklDnnData<Tfilter>& filter, const memory::desc& filter_md,
const MklDnnShape& filter_mkl_shape) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
const Tensor& cached_filter_data_tensor = cached_filter_data_;
if (cached_filter_data_tensor.NumElements() > 0) {
return;
}
#ifdef ENABLE_ONEDNN_V3
if (filter_md.get_format_kind() != memory::format_kind::blocked) {
return;
}
#endif
filter.SetUsrMem(filter_md, &filter_tensor);
filter.CheckReorderToOpMem(conv_fwd_pd.get()->weights_desc(),
this->cpu_engine_, context);
filter_data = static_cast<Tfilter*>(filter.GetOpMem().get_data_handle());
Tensor* filter_tensor_ptr = nullptr;
AllocateTensor(context, *conv_fwd_pd, &filter_tensor_ptr,
&filter_mkl_shape);
void* cached_filter_data = filter.GetTensorBuffer(filter_tensor_ptr);
size_t cached_filter_data_size = filter.GetOpMem().get_desc().get_size();
memcpy(cached_filter_data, filter_data, cached_filter_data_size);
}
#ifndef ENABLE_ONEDNN_V3
bool AreMemoryDescriptorsEqual(const memory::desc& filter_md,
const Tensor& cached_filter_md) {
auto filter_md_data = filter_md.data;
const char* filter_data = reinterpret_cast<const char*>(&filter_md_data);
auto cached_filter_md_data = cached_filter_md.scalar<int64_t>()();
const char* cached_filter_data =
reinterpret_cast<const char*>(&cached_filter_md_data);
for (size_t i = 0; i < sizeof(filter_md_data); ++i) {
if (*filter_data++ != *cached_filter_data++) {
return false;
}
}
return true;
}
#endif
Tfilter* GetCachedFilter(OpKernelContext* context,
const memory::desc& filter_md)
TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
const Tensor& cached_filter_data = cached_filter_data_;
#ifndef ENABLE_ONEDNN_V3
const Tensor& cached_filter_md = cached_filter_md_;
if (filter_md == *static_cast<memory::desc*>(cached_filter_md.data())) {
return static_cast<Tfilter*>(
const_cast<Tfilter*>(cached_filter_data.flat<Tfilter>().data()));
}
return nullptr;
#else
if (cached_filter_md_ ==
FilterMemoryDesc(filter_md.get_ndims(), filter_md.get_inner_nblks(),
filter_md.get_data_type(), filter_md.get_dims(),
filter_md.get_inner_blks(), filter_md.get_inner_idxs(),
filter_md.get_strides())) {
return static_cast<Tfilter*>(
const_cast<Tfilter*>(cached_filter_data.flat<Tfilter>().data()));
}
return nullptr;
#endif
}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool pad_enabled, bool native_format>
class MklFusedConvOp
: public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format> {
public:
explicit MklFusedConvOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format>(context) {
std::vector<string> fused_ops;
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops));
int num_args;
OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args));
OP_REQUIRES(context, !fused_ops.empty(),
absl::InvalidArgumentError(
"Fused Conv2D must have at least one fused op."));
if (fused_ops == std::vector<string>{"BiasAdd"}) {
this->set_fuse_biasadd(true);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"Relu"}) {
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"Relu6"}) {
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"Elu"}) {
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops == std::vector<string>{"LeakyRelu"}) {
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else if (fused_ops == std::vector<string>{"FusedBatchNorm"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) {
this->set_fuse_biasadd(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "_FusedHardSwish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish,
1.0 / 6.0, 0.5);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Relu"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Relu6"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Elu"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops ==
std::vector<string>{"FusedBatchNorm", "LeakyRelu"}) {
float epsilon, leakyrelu_alpha;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else if (fused_ops ==
std::vector<string>{"FusedBatchNorm", "_MklSwish"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu6"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops ==
std::vector<string>{"BiasAdd", "Add", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Mish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish, 1.0);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"_FusedConv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "_MklSwish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else {
OP_REQUIRES(context, false,
absl::UnimplementedError(
absl::StrCat("Fusion is not implemented: [",
absl::StrJoin(fused_ops, ","), "]")));
}
if (pad_enabled) {
this->set_fuse_pad(true);
}
}
void ComputeBNScale(OpKernelContext* context, float epsilon,
int bn_variance_index, Tinput* scale_buf_ptr) override {
const Tensor& bn_var_tensor = MklGetInput(context, bn_variance_index);
Eigen::Tensor<Tinput, 1, Eigen::RowMajor> bn_rsqrt =
(bn_var_tensor.flat<Tinput>() + static_cast<Tinput>(epsilon)).rsqrt();
Tinput* bn_rsqrt_data = bn_rsqrt.data();
int64_t num_elem = bn_var_tensor.shape().dim_size(0);
for (int64_t i = 0; i < num_elem; i++) {
scale_buf_ptr[i] = bn_rsqrt_data[i];
}
return;
}
virtual ~MklFusedConvOp() {}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool pad_enabled, bool bias_enabled, bool is_depthwise,
bool native_format>
class MklFusedDepthwiseConvOp
: public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, bias_enabled, false, is_depthwise,
native_format> {
public:
explicit MklFusedDepthwiseConvOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, bias_enabled, false, is_depthwise, native_format>(
context) {
std::vector<string> fused_ops;
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops));
int num_args;
OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args));
OP_REQUIRES(context, !fused_ops.empty(),
absl::InvalidArgumentError(
"Fused DepthwiseConv2D must have at least one fused op."));
if (fused_ops == std::vector<string>{"BiasAdd"}) {
this->set_fuse_biasadd(true);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) {
this->set_fuse_biasadd(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops == std::vector<string>{"BiasAdd", "_FusedHardSwish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish,
1.0 / 6.0, 0.5);
} else {
OP_REQUIRES(context, false,
absl::InvalidArgumentError(
absl::StrCat("Fusion is not implemented: [",
absl::StrJoin(fused_ops, ","), "]")));
}
OP_REQUIRES(
context, num_args == 1,
absl::InvalidArgumentError(
"Fused DepthwiseConv2D must have one extra argument: bias."));
if (pad_enabled) {
this->set_fuse_pad(true);
}
}
virtual ~MklFusedDepthwiseConvOp() {}
};
enum class oneDNNFusedOps { kBias = 1, kSum = 2, kRelu = 4, kRequantize = 8 };
template <typename Device, typename Tinput, typename Tbias, typename Toutput,
typename Ttemp_output, bool is_depthwise, string legacy_fused_ops[],
int num_fused_ops>
class MklQuantizedConvOp
: public MklConvOp<
Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32, false, false,
is_depthwise, true> {
public:
virtual ~MklQuantizedConvOp() {
if (this->input_bias_ != nullptr) {
delete this->input_bias_;
input_bias_ = nullptr;
}
if (this->scaled_bias_ != nullptr) {
delete this->scaled_bias_;
scaled_bias_ = nullptr;
}
}
explicit MklQuantizedConvOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, qint8, Tbias, Toutput,
Ttemp_output, int32,
false, false, is_depthwise,
true>(context) {
std::vector<std::vector<string>> supported_fusions = {
{"BiasAdd"},
{"Relu"},
{"Requantize"},
{"BiasAdd", "Relu"},
{"BiasAdd", "Requantize"},
{"Relu", "Requantize"},
{"BiasAdd", "Relu", "Requantize"},
{"BiasAdd", "Sum", "Relu"},
{"BiasAdd", "Sum", "Relu", "Requantize"}};
std::vector<string> fused_ops_attr;
if (context->HasAttr("fused_ops")) {
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops_attr));
}
OP_REQUIRES(context, !(fused_ops_attr.size() > 0 && num_fused_ops > 0),
absl::InvalidArgumentError(
"QuantizedConv fused ops should be only available through "
"either new API or old API, got both."));
if (fused_ops_attr.size() > 0) {
fused_ops_ = fused_ops_attr;
} else if (num_fused_ops > 0) {
for (int i = 0; i < num_fused_ops; ++i) {
fused_ops_.push_back(legacy_fused_ops[i]);
}
}
if (fused_ops_.size() > 0) {
bool is_fusion_supported =
std::find(supported_fusions.begin(), supported_fusions.end(),
fused_ops_) != supported_fusions.end();
OP_REQUIRES(context, is_fusion_supported,
absl::InvalidArgumentError(
absl::StrCat("Unsupported QuantizedConv fusion: [",
absl::StrJoin(fused_ops_, ","), "]")));
}
for (const auto& op : fused_ops_) {
fused_op_flags_ ^= static_cast<int64_t>(StrToEnum(op));
}
DataType bias_dt, summand_dt, out_dt;
if (IsFused(oneDNNFusedOps::kBias)) {
this->set_fuse_biasadd(true);
OP_REQUIRES_OK(context,
context->GetAttr("is_bias_const", &is_bias_const_));
if (context->HasAttr("Tbias")) {
OP_REQUIRES_OK(context, context->GetAttr("Tbias", &bias_dt));
}
}
if (IsFused(oneDNNFusedOps::kSum)) {
this->set_fuse_add(true);
}
const bool fuse_requantize = IsFused(oneDNNFusedOps::kRequantize);
OP_REQUIRES_OK(context, context->GetAttr("out_type", &out_dt));
if (fuse_requantize) {
OP_REQUIRES(
context, out_dt == DT_QINT8 || out_dt == DT_QUINT8,
absl::InvalidArgumentError("QuantizedConv: unsupported output "
"type when Requantize is fused."));
}
if (context->HasAttr("Tsummand")) {
OP_REQUIRES_OK(context, context->GetAttr("Tsummand", &summand_dt));
if (!this->get_fuse_add()) {
OP_REQUIRES(
context, summand_dt == out_dt,
absl::InvalidArgumentError(
"QuantizedConv: incorrect summand data type. When Sum is not "
"fused, Tsummand attribute must have same value as out_type."));
}
}
#ifndef ENABLE_ONEDNN_V3
int idx = fuse_requantize ? 1 : 0;
#else
post_op_to_idx_["src_scale"] = 0;
post_op_to_idx_["wei_scale"] = 1;
post_op_to_idx_["dst_scale"] = 2;
int idx = 3;
#endif
for (int i = 0; i < fused_ops_.size(); ++i) {
if (fused_ops_[i] == "Requantize") {
#ifndef ENABLE_ONEDNN_V3
post_op_to_idx_["output_scale"] = 0;
#endif
} else if (fused_ops_[i] == "Sum") {
post_op_to_idx_["sum"] = idx++;
} else if (fused_ops_[i] == "Relu") {
post_op_to_idx_["activation"] = idx++;
}
}
bool is_filter_const;
OP_REQUIRES_OK(context,
context->GetAttr("is_filter_const", &is_filter_const));
OP_REQUIRES(
context, is_filter_const,
absl::InvalidArgumentError("QuantizedConv: filter must be a constant"));
if (num_fused_ops == -1) {
int non_minmax_arg_idx_base = 2;
int minmax_arg_idx_base = 6;
int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0;
int summand_idx_offset = this->get_fuse_add() ? 1 : 0;
int bias_min_max_idx_offset =
this->get_fuse_biasadd() &&
!(bias_dt == DT_FLOAT || bias_dt == DT_QINT32)
? 2
: 0;
min_input_idx_ =
non_minmax_arg_idx_base + bias_idx_offset + summand_idx_offset;
max_input_idx_ = min_input_idx_ + 1;
min_filter_idx_ = min_input_idx_ + 2;
max_filter_idx_ = min_input_idx_ + 3;
if (this->get_fuse_biasadd()) {
min_bias_idx_ =
minmax_arg_idx_base + bias_idx_offset + summand_idx_offset;
max_bias_idx_ = min_bias_idx_ + 1;
}
if (this->get_fuse_add()) {
this->set_input_add_idx(non_minmax_arg_idx_base + bias_idx_offset);
if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) {
min_summand_idx_ = minmax_arg_idx_base + bias_idx_offset +
summand_idx_offset + bias_min_max_idx_offset;
max_summand_idx_ = min_summand_idx_ + 1;
}
}
if (fuse_requantize) {
min_freezed_output_idx_ = context->num_inputs() - 2;
max_freezed_output_idx_ = min_freezed_output_idx_ + 1;
}
} else {
int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0;
min_input_idx_ = 2 + bias_idx_offset;
max_input_idx_ = 3 + bias_idx_offset;
min_filter_idx_ = 4 + bias_idx_offset;
max_filter_idx_ = 5 + bias_idx_offset;
if (fuse_requantize) {
min_freezed_output_idx_ = 6 + bias_idx_offset;
max_freezed_output_idx_ = 7 + bias_idx_offset;
}
if (this->get_fuse_add()) {
int input_add_idx = std::is_same<Toutput, quint8>::value
? context->num_inputs() - 1 - 2
: context->num_inputs() - 1;
this->set_input_add_idx(input_add_idx);
if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) {
min_summand_idx_ = 9 + bias_idx_offset;
max_summand_idx_ = 10 + bias_idx_offset;
}
}
}
}
void Compute(OpKernelContext* context) override {
MklConvOp<Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32, false,
false, is_depthwise,
true>::Compute(context);
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value) {
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_min->flat<float>()(0) =
context->input(min_freezed_output_idx_).template scalar<float>()();
output_max->flat<float>()(0) =
context->input(max_freezed_output_idx_).template scalar<float>()();
} else {
const Tensor& min_filter = context->input(min_filter_idx_);
const Tensor& max_filter = context->input(max_filter_idx_);
if (min_filter.dims() == 0) {
float min_output_value;
float max_output_value;
MklQuantizationRangeForMultiplication<Tinput, qint8, qint32>(
min_input, max_input, min_filter.scalar<float>()(),
max_filter.scalar<float>()(), &min_output_value, &max_output_value);
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_min->flat<float>()(0) = min_output_value;
output_max->flat<float>()(0) = max_output_value;
} else {
size_t depth = min_filter.NumElements();
OP_REQUIRES_OK(context,
context->allocate_output(
1, {static_cast<ptrdiff_t>(depth)}, &output_min));
OP_REQUIRES_OK(context,
context->allocate_output(
2, {static_cast<ptrdiff_t>(depth)}, &output_max));
MklQuantizationRangeForMultiplication<Tinput, qint8, qint32>(
min_input, max_input, min_filter, max_filter, &output_min,
&output_max);
}
}
}
protected:
void ExtendConvFwdParams(OpKernelContext* context,
MklConvFwdParams& params) override {
MklConvOp<Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32, false,
false, is_depthwise,
true>::ExtendConvFwdParams(context, params);
params.post_op_params.resize(post_op_to_idx_.size());
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
OP_REQUIRES(
context,
((min_filter_vector.NumElements() > 0) &&
(max_filter_vector.NumElements() > 0) &&
(min_filter_vector.shape() == max_filter_vector.shape())),
absl::InvalidArgumentError("`min_ and max_filter` must have same"
"shape and contain at least one element."));
size_t depth = min_filter_vector.NumElements();
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
std::vector<float> SCALE(depth);
float float_input_range =
std::max(std::abs(min_input), std::abs(max_input));
#ifdef ENABLE_ONEDNN_V3
float int_input_limit =
std::is_same<Tinput, quint8>::value ? 255.0f : 127.0f;
const float src_scale = float_input_range / int_input_limit;
#endif
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value) {
const float min_freezed_output =
context->input(min_freezed_output_idx_).template scalar<float>()();
const float max_freezed_output =
context->input(max_freezed_output_idx_).template scalar<float>()();
float int_output_limit =
std::is_same<Toutput, quint8>::value ? 255.0f : 127.0f;
float float_output_range =
std::max(std::abs(min_freezed_output), std::abs(max_freezed_output));
#ifndef ENABLE_ONEDNN_V3
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
#endif
for (size_t i = 0; i < depth; ++i) {
float float_filter_range =
std::max(std::abs(min_filter[i]), std::abs(max_filter[i]));
#ifndef ENABLE_ONEDNN_V3
scales[i] = int_output_limit * float_input_range * float_filter_range /
(int_const_scale_limit * float_output_range);
#else
wei_scale[i] = float_filter_range / 127.0;
#endif
}
#ifndef ENABLE_ONEDNN_V3
FactoryKeyCreator param_key;
param_key.AddAsKey<float>(min_input);
param_key.AddAsKey<float>(max_input);
param_key.AddAsKey<float>(min_freezed_output);
param_key.AddAsKey<float>(max_freezed_output);
param_key.AddAsKey<const float*>(min_filter);
param_key.AddAsKey<const float*>(max_filter);
params.post_op_params[post_op_to_idx_["output_scale"]] = {
"output_scale", dnnl::algorithm::undef, scales, param_key.GetKey()};
#else
const float dst_scale = float_output_range / int_output_limit;
FactoryKeyCreator dst_param_key;
dst_param_key.AddAsKey<float>(min_freezed_output);
dst_param_key.AddAsKey<float>(max_freezed_output);
params.post_op_params[post_op_to_idx_["dst_scale"]] = {
"dst_scale",
dnnl::algorithm::undef,
{dst_scale},
dst_param_key.GetKey()};
#endif
} else {
#ifdef ENABLE_ONEDNN_V3
if (!std::is_same<Toutput, qint32>::value)
TF_CHECK_OK(absl::FailedPreconditionError(
"Output datatype is expected to be qint32."));
float min_min_filter = min_filter[0];
float max_max_filter = max_filter[0];
for (size_t i = 0; i < depth; ++i) {
float float_filter_range =
std::max(std::abs(min_filter[i]), std::abs(max_filter[i]));
wei_scale[i] = float_filter_range / 127.0;
if (min_filter[i] < min_min_filter) min_min_filter = min_filter[i];
if (max_filter[i] > max_max_filter) max_max_filter = max_filter[i];
}
const float single_wei_scale =
std::max(std::abs(min_min_filter), std::abs(max_max_filter)) / 127.0;
const float dst_scale = single_wei_scale * src_scale;
FactoryKeyCreator dst_param_key;
dst_param_key.AddAsKey<float>(dst_scale);
params.post_op_params[post_op_to_idx_["dst_scale"]] = {
"dst_scale",
dnnl::algorithm::undef,
{dst_scale},
dst_param_key.GetKey()};
#endif
}
#ifdef ENABLE_ONEDNN_V3
FactoryKeyCreator src_param_key;
src_param_key.AddAsKey<float>(min_input);
src_param_key.AddAsKey<float>(max_input);
FactoryKeyCreator wei_param_key;
wei_param_key.AddAsKey<const float*>(min_filter);
wei_param_key.AddAsKey<const float*>(max_filter);
params.post_op_params[post_op_to_idx_["src_scale"]] = {
"src_scale",
dnnl::algorithm::undef,
{src_scale},
src_param_key.GetKey()};
params.post_op_params[post_op_to_idx_["wei_scale"]] = {
"wei_scale", dnnl::algorithm::undef, wei_scale, wei_param_key.GetKey()};
#endif
if (this->get_fuse_add()) {
DataType summand_dt = this->input_type(this->get_input_add_idx());
if (std::is_same<Toutput, quint8>::value) {
bool summand_condition =
(summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8);
DCHECK((summand_condition));
const Tensor& min_freezed_output_tensor =
context->input(min_freezed_output_idx_);
const Tensor& max_freezed_output_tensor =
context->input(max_freezed_output_idx_);
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(min_freezed_output_tensor.shape()),
absl::InvalidArgumentError(
absl::StrCat("`min_freezed_output` must be rank 0 but is rank ",
min_freezed_output_tensor.dims())));
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(max_freezed_output_tensor.shape()),
absl::InvalidArgumentError(
absl::StrCat("`max_freezed_output` must be rank 0 but is rank ",
max_freezed_output_tensor.dims())));
const Tensor& min_freezed_summand_tensor =
context->input(min_summand_idx_);
const Tensor& max_freezed_summand_tensor =
context->input(max_summand_idx_);
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(min_freezed_summand_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_freezed_summand` must be rank 0 but is rank ",
min_freezed_summand_tensor.dims())));
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(max_freezed_summand_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_freezed_summand` must be rank 0 but is rank ",
max_freezed_summand_tensor.dims())));
#ifndef ENABLE_ONEDNN_V3
const float min_freezed_output =
min_freezed_output_tensor.template scalar<float>()();
const float max_freezed_output =
max_freezed_output_tensor.template scalar<float>()();
float output_range = std::max(std::abs(min_freezed_output),
std::abs(max_freezed_output));
#endif
const float min_freezed_summand =
min_freezed_summand_tensor.template scalar<float>()();
const float max_freezed_summand =
max_freezed_summand_tensor.template scalar<float>()();
float summand_range = std::max(std::abs(min_freezed_summand),
std::abs(max_freezed_summand));
if (summand_dt == DT_QUINT8) {
params.post_op_params[post_op_to_idx_["sum"]] = {
"sum",
dnnl::algorithm::undef,
{SUMMAND_SCALE_U8(summand_range, output_range)},
""};
} else {
params.post_op_params[post_op_to_idx_["sum"]] = {
"sum",
dnnl::algorithm::undef,
{SUMMAND_SCALE_S8(summand_range, output_range)},
""};
}
} else {
params.post_op_params[post_op_to_idx_["sum"]] = {"sum",
dnnl::algorithm::undef,
{1.0},
"",
#ifdef ENABLE_ONEDNN_V3
summand_dt
#endif
};
}
}
if (IsFused(oneDNNFusedOps::kRelu)) {
params.post_op_params[post_op_to_idx_["activation"]] = {
"activation", dnnl::algorithm::eltwise_relu, {1.0, 0.0, 0.0}, ""};
}
}
void AllocateOutputTensor(OpKernelContext* context,
const ConvFwdPd& conv_prim_desc,
const memory::dims& output_dims_mkl_order,
MklTensorFormat output_tf_format,
MklDnnShape* output_mkl_shape,
Tensor** output_tensor) override {
if (!this->get_fuse_add()) {
MklConvOp<
Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32,
false, false, is_depthwise,
true>::AllocateOutputTensor(context, conv_prim_desc,
output_dims_mkl_order,
output_tf_format,
output_mkl_shape,
output_tensor);
} else {
if (std::is_same<Toutput, quint8>::value) {
int summand_idx = this->get_input_add_idx();
DataType summand_dt = this->input_type(summand_idx);
bool summand_condition =
(summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8);
DCHECK((summand_condition));
Tensor& summand = const_cast<Tensor&>(context->input(summand_idx));
if (summand_dt == DT_QINT8) {
OP_REQUIRES_OK(context, summand.BitcastFrom(summand, DT_QUINT8,
summand.shape()));
}
OP_REQUIRES(context,
context->forward_input_to_output_with_shape(
summand_idx, 0, summand.shape(), output_tensor),
absl::InvalidArgumentError(
"Summand cannot be forwarded in the current fusion."));
return;
}
#ifndef ENABLE_ONEDNN_V3
MklConvOp<
Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32,
false, false, is_depthwise,
true>::AllocateOutputTensor(context, conv_prim_desc,
output_dims_mkl_order,
output_tf_format,
output_mkl_shape,
output_tensor);
const Tensor& summand = context->input(this->get_input_add_idx());
if (summand.dtype() != DT_FLOAT)
TF_CHECK_OK(absl::FailedPreconditionError(
"Current fusion requires summand to be float"));
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
size_t depth = min_filter_vector.NumElements();
std::vector<float> scales(depth);
for (size_t i = 0; i < depth; ++i) {
scales[i] =
int_const_scale_limit /
(std::max(std::abs(max_input), std::abs(min_input)) *
std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));
}
dnnl::primitive_attr reorder_attr;
#ifndef ENABLE_ONEDNN_V3
if (depth == 1) {
reorder_attr.set_output_scales(0, scales);
} else {
reorder_attr.set_output_scales(2, scales);
}
#else
DCHECK_EQ(depth, 1);
reorder_attr.set_scales_mask(DNNL_ARG_SRC, 0);
reorder_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0);
reorder_attr.set_scales_mask(DNNL_ARG_DST, 0);
#endif
auto summand_md = memory::desc(output_dims_mkl_order, MklDnnType<Tbias>(),
memory::format_tag::nhwc);
void* summand_buf =
static_cast<void*>(const_cast<Tbias*>(summand.flat<Tbias>().data()));
void* dst_buf =
static_cast<void*>((*output_tensor)->flat<Ttemp_output>().data());
summand_.reset(new memory(summand_md, this->cpu_engine_, summand_buf));
dst_.reset(
new memory(conv_prim_desc.dst_desc(), this->cpu_engine_, dst_buf));
auto reorder_desc =
ReorderPd(this->cpu_engine_, summand_md, this->cpu_engine_,
conv_prim_desc.dst_desc(), reorder_attr);
CreateAndExecuteReorder(reorder_desc, *summand_, *dst_, this->cpu_engine_,
context);
#else
int summand_idx = this->get_input_add_idx();
DataType summand_dt = this->input_type(summand_idx);
if (summand_dt != DT_FLOAT)
TF_CHECK_OK(absl::FailedPreconditionError(
"Summand datatype is expected to be float."));
Tensor& summand_float = const_cast<Tensor&>(context->input(summand_idx));
OP_REQUIRES_OK(context,
summand_float.BitcastFrom(summand_float, DT_QINT32,
summand_float.shape()));
OP_REQUIRES(context,
context->forward_input_to_output_with_shape(
summand_idx, 0, summand_float.shape(), output_tensor),
absl::InvalidArgumentError(
"Summand cannot be forwarded in the current fusion."));
#endif
}
}
void* GetBiasHandle(OpKernelContext* context,
std::shared_ptr<ConvFwdPd>& conv_fwd_pd,
const Tensor& bias_tensor) override {
if (!this->get_fuse_biasadd()) {
return nullptr;
}
#ifndef ENABLE_ONEDNN_V3
if (std::is_same<Tbias, qint32>::value) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
}
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
size_t depth = min_filter_vector.NumElements();
bool scales_are_valid = (depth == scales_.size());
scales_.resize(depth);
for (size_t i = 0; i < depth; ++i) {
float tmp_scale =
int_const_scale_limit /
(std::max(std::abs(max_input), std::abs(min_input)) *
std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));
if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) {
scales_are_valid = false;
}
scales_[i] = tmp_scale;
}
if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) {
dnnl::primitive_attr bias_attr;
#ifndef ENABLE_ONEDNN_V3
if (depth == 1) {
bias_attr.set_output_scales(0, scales_);
} else {
bias_attr.set_output_scales(1, scales_);
}
#else
DCHECK_EQ(depth, 1);
bias_attr.set_scales_mask(DNNL_ARG_SRC, 0);
bias_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0);
bias_attr.set_scales_mask(DNNL_ARG_DST, 0);
#endif
auto bias_md = memory::desc({static_cast<int>(bias_tensor.NumElements())},
MklDnnType<Tbias>(), memory::format_tag::x);
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
if (!input_bias_) {
input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf);
} else {
input_bias_->set_data_handle(bias_buf);
}
if (!scaled_bias_buf_)
AllocTmpBuffer<Tbias>(context, &scaled_bias_tensor_,
conv_fwd_pd->bias_desc(), &scaled_bias_buf_);
if (!scaled_bias_) {
scaled_bias_ = new memory(bias_md, this->cpu_engine_, scaled_bias_buf_);
} else {
scaled_bias_->set_data_handle(scaled_bias_buf_);
}
auto reorder_desc =
ReorderPd(this->cpu_engine_, input_bias_->get_desc(),
this->cpu_engine_, scaled_bias_->get_desc(), bias_attr);
CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_,
this->cpu_engine_, context);
Tbias* bias_data =
reinterpret_cast<Tbias*>(scaled_bias_->get_data_handle());
if (is_bias_const_)
CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_);
return bias_data;
}
return GetCachedBias(context);
#else
if (std::is_same<Tbias, float>::value) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
}
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
if ((min_filter_vector.NumElements() == 0) ||
(max_filter_vector.NumElements() == 0) ||
(min_filter_vector.shape() != max_filter_vector.shape())) {
TF_CHECK_OK(absl::FailedPreconditionError(
"`min_filter and max_filter` must have same"
"shape and contain at least one element."));
}
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
size_t depth = min_filter_vector.NumElements();
bool scales_are_valid = (depth == scales_.size());
scales_.resize(depth);
for (size_t i = 0; i < depth; ++i) {
float tmp_scale =
int_const_scale_limit /
(std::max(std::abs(max_input), std::abs(min_input)) *
std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));
if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) {
scales_are_valid = false;
}
scales_[i] = tmp_scale;
}
if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) {
dnnl::primitive_attr reorder_attr;
if (depth == 1) {
reorder_attr.set_scales_mask(DNNL_ARG_DST, 0);
} else {
reorder_attr.set_scales_mask(DNNL_ARG_DST, 1);
}
auto bias_md = memory::desc({static_cast<int>(bias_tensor.NumElements())},
MklDnnType<Tbias>(), memory::format_tag::x);
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
if (!input_bias_) {
input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf);
} else {
input_bias_->set_data_handle(bias_buf);
}
if (!scaled_bias_buf_) {
AllocTmpBuffer<float>(context, &scaled_bias_tensor_,
conv_fwd_pd->bias_desc(), &scaled_bias_buf_);
}
if (!scaled_bias_) {
scaled_bias_ = new memory(conv_fwd_pd->bias_desc(), this->cpu_engine_,
scaled_bias_buf_);
} else {
scaled_bias_->set_data_handle(scaled_bias_buf_);
}
std::unique_ptr<memory> scale_mem(
new memory({{static_cast<int64_t>(depth)},
MklDnnType<float>(),
memory::format_tag::x},
this->cpu_engine_, scales_.data()));
auto reorder_desc =
ReorderPd(this->cpu_engine_, input_bias_->get_desc(),
this->cpu_engine_, scaled_bias_->get_desc(), reorder_attr);
CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_,
this->cpu_engine_, context, scale_mem.get());
float* bias_data =
reinterpret_cast<float*>(scaled_bias_->get_data_handle());
if (is_bias_const_)
CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_);
return bias_data;
}
return GetCachedBias(context);
#endif
}
bool is_bias_const_;
Tensor cached_bias_data_ TF_GUARDED_BY(bias_cache_mu_);
memory* input_bias_ = nullptr;
memory* scaled_bias_ = nullptr;
Tensor scaled_bias_tensor_;
void* scaled_bias_buf_ = nullptr;
private:
std::vector<float> scales_;
mutex bias_cache_mu_;
std::vector<string> fused_ops_;
std::map<string, int> post_op_to_idx_;
int64_t fused_op_flags_ = 0;
std::unordered_map<string, oneDNNFusedOps> str_to_enum_{
{"BiasAdd", oneDNNFusedOps::kBias},
{"Sum", oneDNNFusedOps::kSum},
{"Relu", oneDNNFusedOps::kRelu},
{"Requantize", oneDNNFusedOps::kRequantize}};
std::shared_ptr<dnnl::memory> summand_;
std::shared_ptr<dnnl::memory> dst_;
int min_input_idx_ = -1;
int max_input_idx_ = -1;
int min_filter_idx_ = -1;
int max_filter_idx_ = -1;
int min_bias_idx_ = -1;
int max_bias_idx_ = -1;
int min_summand_idx_ = -1;
int max_summand_idx_ = -1;
int min_freezed_output_idx_ = -1;
int max_freezed_output_idx_ = -1;
inline bool IsFused(oneDNNFusedOps op) {
return fused_op_flags_ & (static_cast<int64_t>(op));
}
inline oneDNNFusedOps StrToEnum(const string op) {
CHECK_EQ(str_to_enum_.find(op) != str_to_enum_.end(), true)
<< "Error: Unknown post op: " << op;
return str_to_enum_[op];
}
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,
Tensor** bias_tensor) {
DCHECK(bias_tensor);
TensorShape bias_tf_shape;
bias_tf_shape.AddDim(
(conv_prim_desc.bias_desc().get_size() / sizeof(TSCALED_BIAS)));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<TSCALED_BIAS>::value,
bias_tf_shape, &cached_bias_data_));
*bias_tensor = &cached_bias_data_;
}
inline bool IsBiasCacheEmpty(OpKernelContext* context)
TF_LOCKS_EXCLUDED(bias_cache_mu_) {
tf_shared_lock lock(bias_cache_mu_);
return (cached_bias_data_.NumElements() == 0);
}
void CacheBias(OpKernelContext* context,
const std::shared_ptr<ConvFwdPd>& conv_fwd_pd,
TSCALED_BIAS* bias_data, const memory* scaled_bias)
TF_LOCKS_EXCLUDED(bias_cache_mu_) {
mutex_lock lock(bias_cache_mu_);
if (cached_bias_data_.NumElements() > 0) {
return;
}
Tensor* bias_tensor_ptr = nullptr;
AllocateTensor(context, *conv_fwd_pd, &bias_tensor_ptr);
void* cached_bias_data = const_cast<void*>(
static_cast<const void*>(bias_tensor_ptr->flat<TSCALED_BIAS>().data()));
size_t cached_bias_data_size = scaled_bias->get_desc().get_size();
memcpy(cached_bias_data, bias_data, cached_bias_data_size);
}
TSCALED_BIAS* GetCachedBias(OpKernelContext* context)
TF_LOCKS_EXCLUDED(bias_cache_mu_) {
tf_shared_lock lock(bias_cache_mu_);
const Tensor& cached_bias_data = cached_bias_data_;
return static_cast<TSCALED_BIAS*>(const_cast<TSCALED_BIAS*>(
cached_bias_data.flat<TSCALED_BIAS>().data()));
}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool pad_enabled, bool native_format>
class MklFusedConv3DOp
: public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format> {
public:
explicit MklFusedConv3DOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format>(context) {
std::vector<string> fused_ops;
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops));
int num_args;
OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args));
std::vector<int> padding_list;
OP_REQUIRES_OK(context, context->GetAttr("padding_list", &padding_list));
if (padding_list.empty()) {
OP_REQUIRES(
context, !fused_ops.empty(),
absl::InvalidArgumentError("Fused Conv3D must have at least one "
"fused op when Pad is not fused."));
if (std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") ==
fused_ops.end()) {
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv3D must have one extra argument: bias."));
} else if (std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") ==
fused_ops.end() &&
std::find(fused_ops.begin(), fused_ops.end(), "Add") ==
fused_ops.end()) {
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv3D must have two extra arguments: bias and add."));
}
}
if (fused_ops == std::vector<string>{"BiasAdd"}) {
this->set_fuse_biasadd(true);
} else if (fused_ops == std::vector<string>{"BiasAdd", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Mish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) {
this->set_fuse_biasadd(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu6"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops ==
std::vector<string>{"BiasAdd", "Add", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else {
if (padding_list.empty()) {
OP_REQUIRES(context, false,
absl::UnimplementedError(
absl::StrCat("Fusion is not implemented: [",
absl::StrJoin(fused_ops, ","), "]")));
}
}
}
virtual ~MklFusedConv3DOp() {}
};
#define REGISTER_MKL_KERNEL(op, kernel, input_type, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops) \
REGISTER_KERNEL_BUILDER( \
Name(op) \
.Device(DEVICE_CPU) \
.TypeConstraint<input_type>("Tinput") \
.TypeConstraint<qint8>("Tfilter") BIAS_TYPE_CONSTRAINT(bias_type) \
SUMMAND_TYPE_CONSTRAINT(summand_type) \
.TypeConstraint<output_type>("out_type") LABEL, \
kernel TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops));
#define REGISTER_MKL_KERNEL_ALL_INPUT_TYPES( \
op, kernel, bias_type, output_type, summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops) \
REGISTER_MKL_KERNEL(op, kernel, qint8, bias_type, output_type, summand_type, \
is_depthwise, legacy_fused_ops, num_fused_ops); \
REGISTER_MKL_KERNEL(op, kernel, quint8, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops);
#define REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( \
op, kernel, input_type, output_type, summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops) \
REGISTER_MKL_KERNEL(op, kernel, input_type, qint32, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops); \
REGISTER_MKL_KERNEL(op, kernel, input_type, float, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops);
#define REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( \
op, kernel, output_type, summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops) \
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, qint32, output_type, \
summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops); \
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, float, output_type, \
summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops);
#define LABEL
#define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \
summand_type, has_bias, is_depthwise, is_native)
#define BIAS_TYPE_CONSTRAINT(bias_type)
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
REGISTER_MKL_KERNEL("QuantizedConv2D", NoOp, quint8, float, qint32, qint32,
false, false, false);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("QuantizedConv2DWithBias", NoOp, float,
qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("QuantizedConv2DWithBiasAndRelu", NoOp,
float, qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DWithBiasSumAndRelu", NoOp, quint8, float,
qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DAndRequantize", NoOp, quint8, float, qint8,
qint8, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DPerChannel", NoOp, quint8, float, qint32,
qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DAndRelu", NoOp, quint8, float, qint32,
qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DAndReluAndRequantize", NoOp, quint8, float,
quint8, quint8, false, false, false);
REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2D", NoOp, quint8, float, qint32,
qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2DWithBias", NoOp, quint8, float,
qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2DWithBiasAndRelu", NoOp, quint8,
float, qint32, qint32, false, false, false);
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"QuantizedConv2DWithBiasAndRequantize", NoOp, qint8, qint8, false, false,
false);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"QuantizedConv2DWithBiasAndReluAndRequantize", NoOp, quint8, quint8, false,
false, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", NoOp, quint8,
quint8, quint8, false, false, false);
#undef SUMMAND_TYPE_CONSTRAINT
#define SUMMAND_TYPE_CONSTRAINT(summand_type) \
.TypeConstraint<summand_type>("Tsummand")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", NoOp, quint8, quint8,
quint8, false, false, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", NoOp, quint8,
quint8, qint8, false, false, false);
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#undef LABEL
#define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops) \
<CPUDevice, input_type, bias_type, output_type, summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops>
#define BIAS_TYPE_CONSTRAINT(bias_type)
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2D", MklQuantizedConvOp,
float, qint32, qint32, false,
quantized_fusions::none, 0);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DPerChannel",
MklQuantizedConvOp, float, qint32, qint32,
false, quantized_fusions::none, 0);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DWithBias",
MklQuantizedConvOp, float, qint32, qint32,
false, quantized_fusions::bias, 1);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DWithBiasAndRelu",
MklQuantizedConvOp, float, qint32, qint32,
false, quantized_fusions::bias_relu, 2);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DWithBiasSumAndRelu", MklQuantizedConvOp,
quint8, float, qint32, qint32, false,
quantized_fusions::bias_sum_relu, 3);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndRequantize", MklQuantizedConvOp,
quint8, float, qint8, qint8, false,
quantized_fusions::requantize, 1);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndRelu", MklQuantizedConvOp, quint8,
float, qint32, qint32, false, quantized_fusions::relu, 1);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndReluAndRequantize",
MklQuantizedConvOp, quint8, float, quint8, quint8, false,
quantized_fusions::relu_requantize, 2);
REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2D", MklQuantizedConvOp, quint8,
float, qint32, qint32, true, quantized_fusions::none, 0);
REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2DWithBias", MklQuantizedConvOp,
quint8, float, qint32, qint32, true,
quantized_fusions::bias, 1);
REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2DWithBiasAndRelu",
MklQuantizedConvOp, quint8, float, qint32, qint32, true,
quantized_fusions::bias_relu, 2);
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasAndRequantize", MklQuantizedConvOp, qint8,
qint8, false, quantized_fusions::bias_requantize, 2);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasAndReluAndRequantize", MklQuantizedConvOp,
quint8, quint8, false, quantized_fusions::bias_relu_requantize, 3);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
MklQuantizedConvOp, quint8, quint8, quint8, true,
quantized_fusions::bias_relu_requantize, 3);
#undef LABEL
#define LABEL
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, qint32, qint32,
false, quantized_fusions::none, -1)
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, qint32, qint32,
true, quantized_fusions::none, -1)
#undef LABEL
#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)
#undef SUMMAND_TYPE_CONSTRAINT
#define SUMMAND_TYPE_CONSTRAINT(summand_type) \
.TypeConstraint<summand_type>("Tsummand")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasSumAndReluAndRequantize", MklQuantizedConvOp,
quint8, quint8, quint8, false, quantized_fusions::bias_sum_relu_requantize,
4);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasSignedSumAndReluAndRequantize",
MklQuantizedConvOp, quint8, quint8, qint8, false,
quantized_fusions::bias_sum_relu_requantize, 4);
#undef LABEL
#define LABEL
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, qint8, qint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, quint8, qint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, quint8, quint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, qint8, quint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, qint8, qint8,
true, quantized_fusions::none, -1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, quint8, qint8,
true, quantized_fusions::none, -1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, quint8, quint8,
true, quantized_fusions::none, -1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, qint8, quint8,
true, quantized_fusions::none, -1);
#undef LABEL
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#define REGISTER_NO_OP_CPU_2D_DEPTHWISE(T) \
REGISTER_KERNEL_BUILDER(Name("_FusedDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T"), \
NoOp);
TF_CALL_float(REGISTER_NO_OP_CPU_2D_DEPTHWISE);
TF_CALL_bfloat16(REGISTER_NO_OP_CPU_2D_DEPTHWISE);
TF_CALL_half(REGISTER_NO_OP_CPU_2D_DEPTHWISE);
#define REGISTER_MKL_CPU_2D(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklConv2DWithBias") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, true, false, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("__MklDummyConv2DWithBias") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklDummyOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, true, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int64, false, true, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("__MklDummyPadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklDummyOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeConv2DWithBias") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, true, false, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, true, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int64, false, true, false, true>);
TF_CALL_float(REGISTER_MKL_CPU_2D);
TF_CALL_bfloat16(REGISTER_MKL_CPU_2D);
TF_CALL_half(REGISTER_MKL_CPU_2D);
#define REGISTER_MKL_CPU_2D_DEPTHWISE(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedDepthwiseConvOp<CPUDevice, T, T, T, T, T, int32, false, true, \
true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedDepthwiseConvOp<CPUDevice, T, T, T, T, T, int32, false, true, \
true, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, true, true>);
TF_CALL_float(REGISTER_MKL_CPU_2D_DEPTHWISE);
TF_CALL_bfloat16(REGISTER_MKL_CPU_2D_DEPTHWISE);
TF_CALL_half(REGISTER_MKL_CPU_2D_DEPTHWISE);
#define REGISTER_MKL_CPU_2D_FUSED(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<int32>("Tpaddings") \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int64, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("__MklDummyPadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklDummyOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<int32>("Tpaddings") \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, true, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int64, true, true>);
TF_CALL_float(REGISTER_MKL_CPU_2D_FUSED);
TF_CALL_bfloat16(REGISTER_MKL_CPU_2D_FUSED);
TF_CALL_half(REGISTER_MKL_CPU_2D_FUSED);
#define REGISTER_MKL_CPU_3D(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklConv3D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeConv3D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedConv3D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConv3DOp<CPUDevice, T, T, T, T, T, int32, false, true>);
TF_CALL_float(REGISTER_MKL_CPU_3D);
TF_CALL_bfloat16(REGISTER_MKL_CPU_3D);
TF_CALL_half(REGISTER_MKL_CPU_3D);
#undef APPEND_DEPTHWISE
#undef APPEND_ELTWISE
#undef GET_DATA_TYPE
#undef SET_FUSE_ACTIVATION_FOR_RELU6
#undef SET_MKL_LAYOUT
#undef OUTPUT_SCALE_DCHECK
#undef TSCALED_BIAS
#undef SCALE
#undef SUMMAND_SCALE_U8
#undef SUMMAND_SCALE_S8
}
#endif | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
struct Conv2DDimensions {
Conv2DDimensions(int n, int h, int w, int c, int fc, int fh, int fw)
: input_batches(n),
input_height(h),
input_width(w),
input_depth(c),
filter_count(fc),
filter_height(fh),
filter_width(fw) {}
int input_batches;
int input_height;
int input_width;
int input_depth;
int filter_count;
int filter_height;
int filter_width;
};
static Tensor GetRandomTensor(const TensorShape& shape) {
Tensor tensor(DT_FLOAT, TensorShape(shape));
tensor.flat<float>() = tensor.flat<float>().setRandom();
return tensor;
}
static Tensor GetRandomInputTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.input_batches, dims.input_height,
dims.input_width, dims.input_depth});
}
static Tensor GetRandomFilterTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.filter_height, dims.filter_width,
dims.input_depth, dims.filter_count});
}
static Tensor GetRandomOutputTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.input_batches, dims.input_height,
dims.input_width, dims.filter_count});
}
static Tensor GetInputSizesTensor(const Conv2DDimensions& dims) {
return test::AsTensor<int32>({dims.input_batches, dims.input_height,
dims.input_width, dims.input_depth});
}
static Tensor GetFilterSizesTensor(const Conv2DDimensions& dims) {
return test::AsTensor<int32>({dims.filter_height, dims.filter_width,
dims.input_depth, dims.filter_count});
}
static Graph* DefaultConv2D(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* conv2d;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d"), "Conv2D")
.Input(input)
.Input(filter)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d));
return graph;
}
static Graph* MklConv2D(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d;
TF_CHECK_OK(NodeBuilder(graph->NewName("mkl_conv_2d"), "_MklConv2D")
.Input(input)
.Input(filter)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d));
return graph;
}
static Graph* DefaultConv2DBwdInput(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_sizes_t = GetInputSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input_sizes =
test::graph::Constant(graph, input_sizes_t, "input_sizes");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* conv2d_bwd_input;
TF_CHECK_OK(
NodeBuilder(graph->NewName("conv_2d_bwd_input"), "Conv2DBackpropInput")
.Input(input_sizes)
.Input(filter)
.Input(out_backprop)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d_bwd_input));
return graph;
}
static Graph* MklConv2DBwdInput(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_sizes_t = GetInputSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input_sizes =
test::graph::Constant(graph, input_sizes_t, "input_sizes");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d_bwd_input;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_input"),
"_MklConv2DBackpropInput")
.Input(input_sizes)
.Input(filter)
.Input(out_backprop)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d_bwd_input));
return graph;
}
static Graph* DefaultConv2DBwdFilter(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_sizes_t = GetFilterSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter_sizes =
test::graph::Constant(graph, filter_sizes_t, "filter_sizes");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* conv2d_bwd_filter;
TF_CHECK_OK(
NodeBuilder(graph->NewName("conv_2d_bwd_filter"), "Conv2DBackpropFilter")
.Input(input)
.Input(filter_sizes)
.Input(out_backprop)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d_bwd_filter));
return graph;
}
static Graph* MklConv2DBwdFilter(const Conv2DDimensions& dims) {
Graph* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_sizes_t = GetFilterSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter_sizes =
test::graph::Constant(graph, filter_sizes_t, "filter_sizes");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d_bwd_filter;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_filter"),
"_MklConv2DBackpropFilter")
.Input(input)
.Input(filter_sizes)
.Input(out_backprop)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d_bwd_filter));
return graph;
}
#define BM_CONCAT(a, b) a##b
#define BM_NAME(p, type, N, H, W, C, FC, FH, FW) \
BM_CONCAT(BM_##p##_##type##_in_##N##_##H##_##W##_##C, _f_##FC##_##FH##_##FW)
#define BM_Conv2DT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (N) * (H) * (W) * (FC); \
int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2D)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2D(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
#define BM_Conv2DBwdInputT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (N) * (H) * (W) * (C); \
int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdInput)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2DBwdInput(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DBwdInputT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DBwdInputT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
#define BM_Conv2DBwdFilterT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (FH) * (FW) * (C) * (FC); \
int64 flops_per_iter = num_computed_elements * ((N) * (H) * (W)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdFilter)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2DBwdFilter(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DBwdFilterT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DBwdFilterT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
BM_Conv2D(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2D(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2D(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2D(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2D(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2D(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2D(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2DBwdInput(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2DBwdInput(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2DBwdInput(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2DBwdInput(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2DBwdFilter(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2DBwdFilter(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2DBwdFilter(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2DBwdFilter(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ca410313-e701-44a2-b7c7-7d99c9f9e40f | cpp | tensorflow/tensorflow | mkl_swish_op | tensorflow/core/kernels/mkl/mkl_swish_op.cc | tensorflow/core/kernels/mkl/mkl_swish_op_test.cc | #ifdef INTEL_MKL
#include "tensorflow/core/kernels/mkl/mkl_eltwise_activation_base_op.h"
namespace tensorflow {
template <typename Device, typename T>
class MklSwishOp
: public MklEltwiseFwdActivationOpBase<Device, T,
dnnl::algorithm::eltwise_swish> {
public:
~MklSwishOp() {}
explicit MklSwishOp(OpKernelConstruction* context)
: MklEltwiseFwdActivationOpBase<Device, T,
dnnl::algorithm::eltwise_swish>(
context, 1.0f, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const Tensor& src_tensor = context->input(0);
TensorShape src_shape = src_tensor.shape();
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
TensorShape dst_shape = src_shape;
OP_REQUIRES_OK(context, context->allocate_output(
GetTensorDataIndex(0, context->num_outputs()),
dst_shape, &dst_tensor));
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
T feature = (static_cast<T*>(user_i))[0];
T e1 = Eigen::numext::exp(-feature);
(static_cast<T*>(out_o))[0] = feature / (static_cast<T>(1) + e1);
return;
}
};
#define REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklSwish").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
MklSwishOp<CPUDevice, type>);
TF_CALL_float(REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_half(REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES);
}
#endif | #ifdef INTEL_MKL
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/mkl/mkl_eltwise_activation_base_op.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
template <typename T>
static Graph* SwishGraph(const string& kind, const TensorShape& shape) {
auto* graph = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::v();
Tensor input_t(dtype, shape);
input_t.flat<T>().setRandom();
Node* input = test::graph::Constant(graph, input_t, "input");
const bool isDefault = (kind == "Default");
Node* sigmoid;
Node* mul;
Node* swish;
if (isDefault) {
TF_CHECK_OK(NodeBuilder(graph->NewName("Default_sigmoid"), "Sigmoid")
.Input(input)
.Attr("T", dtype)
.Finalize(graph, &sigmoid));
TF_CHECK_OK(NodeBuilder(graph->NewName("Default_mul"), "Mul")
.Input(input)
.Input(sigmoid)
.Attr("T", dtype)
.Finalize(graph, &mul));
return graph;
}
TF_CHECK_OK(NodeBuilder(graph->NewName("Mkl_swish"), "_MklSwish")
.Input(input)
.Attr("T", dtype)
.Finalize(graph, &swish));
return graph;
}
#define BM_SWISH(kind, A, B, C, D, type, T) \
static void BM_SWISH_##kind##_##type##_##A##_##B##_##C##_##D##_##T( \
::testing::benchmark::State& state) { \
int64 num_computed_elements = (A) * (B) * (C) * (D); \
int64 flops_per_iter = num_computed_elements; \
\
test::Benchmark(#type, SwishGraph<T>(#kind, {A, B, C, D})).Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_SWISH_##kind##_##type##_##A##_##B##_##C##_##D##_##T)
#define BENCHMARK_SWISH(A, B, C, D, type, T) \
BM_SWISH(Default, A, B, C, D, type, T); \
BM_SWISH(Mkl, A, B, C, D, type, T);
#define BENCHMARK_DTYPE(T) \
BENCHMARK_SWISH(1, 16, 16, 3, cpu, T); \
BENCHMARK_SWISH(16, 32, 32, 1, cpu, T); \
BENCHMARK_SWISH(16, 64, 64, 128, cpu, T); \
BENCHMARK_SWISH(32, 64, 64, 128, cpu, T); \
BENCHMARK_SWISH(32, 256, 256, 128, cpu, T); \
BENCHMARK_SWISH(32, 512, 512, 128, cpu, T);
BENCHMARK_DTYPE(float)
BENCHMARK_DTYPE(bfloat16)
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_swish_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_swish_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b03d42a1-41b2-4114-b009-33cfe164365f | cpp | tensorflow/tensorflow | mkl_relu_op | tensorflow/core/kernels/mkl/mkl_relu_op.cc | tensorflow/core/kernels/mkl/mkl_relu_op_test.cc | #if defined(INTEL_MKL) && !defined(ENABLE_ONEDNN_V3)
#include <unordered_map>
#include "unsupported/Eigen/CXX11/Tensor"
#include "dnnl.hpp"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/mkl_util.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
using dnnl::algorithm;
using dnnl::eltwise_forward;
using dnnl::memory;
using dnnl::prop_kind;
using dnnl::stream;
using EltwiseFwdPd = dnnl::eltwise_forward::primitive_desc;
using EltwiseBwdPd = dnnl::eltwise_backward::primitive_desc;
namespace tensorflow {
template <typename T>
class MklEltwiseFwdParams {
public:
memory::dims src_dims;
memory::desc src_md;
algorithm alg_kind;
float alpha;
float beta;
MklEltwiseFwdParams(memory::dims src_dims, memory::desc src_md,
algorithm alg_kind, float alpha, float beta)
: src_dims(src_dims),
src_md(src_md),
alg_kind(alg_kind),
alpha(alpha),
beta(beta) {}
};
template <typename T>
class MklEltwiseFwdPrimitive : public MklPrimitive {
public:
explicit MklEltwiseFwdPrimitive(const MklEltwiseFwdParams<T>& fwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.eltwise_fwd == nullptr) {
Setup(fwdParams);
}
}
~MklEltwiseFwdPrimitive() {}
void Execute(const T* src_data, T* dst_data,
std::shared_ptr<stream> fwd_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#ifndef ENABLE_ONEDNN_OPENMP
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *fwd_stream);
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data),
*fwd_stream);
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data));
#endif
DCHECK_EQ(context_.fwd_primitives.size(),
context_.fwd_primitives_args.size());
execute_primitives(context_.fwd_primitives, fwd_stream,
context_.fwd_primitives_args);
context_.src_mem->set_data_handle(DummyData);
context_.dst_mem->set_data_handle(DummyData);
}
std::shared_ptr<EltwiseFwdPd> GetEltwiseFwdPd() { return context_.fwd_pd; }
private:
struct EltwiseFwdContext {
std::shared_ptr<memory> src_mem;
std::shared_ptr<memory> dst_mem;
std::shared_ptr<dnnl::eltwise_forward::desc> fwd_desc;
std::shared_ptr<EltwiseFwdPd> fwd_pd;
std::shared_ptr<memory::desc> src_md;
std::shared_ptr<memory::desc> dst_md;
std::shared_ptr<dnnl::primitive> eltwise_fwd;
std::vector<dnnl::primitive> fwd_primitives;
std::vector<std::unordered_map<int, memory>> fwd_primitives_args;
EltwiseFwdContext()
: src_mem(nullptr),
dst_mem(nullptr),
fwd_desc(nullptr),
fwd_pd(nullptr),
src_md(nullptr),
dst_md(nullptr),
eltwise_fwd(nullptr) {}
};
void Setup(const MklEltwiseFwdParams<T>& fwdParams) {
context_.src_md.reset(new memory::desc(fwdParams.src_md.data));
context_.fwd_desc.reset(new eltwise_forward::desc(
prop_kind::forward, fwdParams.alg_kind, *context_.src_md,
fwdParams.alpha, fwdParams.beta));
context_.fwd_pd.reset(new EltwiseFwdPd(*context_.fwd_desc, cpu_engine_));
auto fwd_pd = context_.fwd_pd.get();
context_.src_mem.reset(
new memory(fwd_pd->src_desc(), cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(fwd_pd->dst_desc(), cpu_engine_, DummyData));
context_.eltwise_fwd.reset(new eltwise_forward(*context_.fwd_pd));
context_.fwd_primitives_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem}, {DNNL_ARG_DST, *context_.dst_mem}});
context_.fwd_primitives.push_back(*context_.eltwise_fwd);
}
struct EltwiseFwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T>
class MklEltwiseFwdPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklEltwiseFwdPrimitive<T>* Get(
const MklEltwiseFwdParams<T>& fwdParams) {
MklEltwiseFwdPrimitive<T>* eltwise_forward = nullptr;
eltwise_forward = static_cast<MklEltwiseFwdPrimitive<T>*>(
MklEltwiseFwdPrimitiveFactory<T>::GetInstance().GetEltwiseFwd(
fwdParams));
if (eltwise_forward == nullptr) {
eltwise_forward = new MklEltwiseFwdPrimitive<T>(fwdParams);
MklEltwiseFwdPrimitiveFactory<T>::GetInstance().SetEltwiseFwd(
fwdParams, eltwise_forward);
}
return eltwise_forward;
}
static MklEltwiseFwdPrimitiveFactory& GetInstance() {
static MklEltwiseFwdPrimitiveFactory instance_;
return instance_;
}
private:
MklEltwiseFwdPrimitiveFactory() {}
~MklEltwiseFwdPrimitiveFactory() {}
static string CreateKey(const MklEltwiseFwdParams<T>& fwdParams) {
string prefix = "eltwise_fwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(fwdParams.src_dims);
key_creator.AddAsKey<int>(static_cast<int>(fwdParams.alg_kind));
key_creator.AddAsKey<float>(static_cast<float>(fwdParams.alpha));
key_creator.AddAsKey<float>(static_cast<float>(fwdParams.beta));
return key_creator.GetKey();
}
MklPrimitive* GetEltwiseFwd(const MklEltwiseFwdParams<T>& fwdParams) {
string key = CreateKey(fwdParams);
return this->GetOp(key);
}
void SetEltwiseFwd(const MklEltwiseFwdParams<T>& fwdParams,
MklPrimitive* op) {
string key = CreateKey(fwdParams);
this->SetOp(key, op);
}
};
template <typename T>
class MklEltwiseBwdParams {
public:
memory::dims src_dims;
memory::desc common_md;
algorithm alg_kind;
float alpha;
float beta;
int forward_input_type;
MklEltwiseBwdParams(const memory::dims& src_dims,
const memory::desc& common_md, algorithm alg_kind,
float alpha, float beta, int forward_input_type = -1)
: src_dims(src_dims),
common_md(common_md),
alg_kind(alg_kind),
alpha(alpha),
beta(beta),
forward_input_type(forward_input_type) {}
};
template <typename T>
class MklEltwiseBwdPrimitive : public MklPrimitive {
public:
explicit MklEltwiseBwdPrimitive(const MklEltwiseBwdParams<T>& bwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.eltwise_bwd == nullptr) {
Setup(bwdParams);
}
}
~MklEltwiseBwdPrimitive() {}
void Execute(const T* src_data, const T* diff_dst_data, T* diff_src_data,
std::shared_ptr<stream> bwd_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#ifndef ENABLE_ONEDNN_OPENMP
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *bwd_stream);
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)), *bwd_stream);
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data),
*bwd_stream);
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)));
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data));
#endif
DCHECK_EQ(context_.bwd_primitives.size(),
context_.bwd_primitives_args.size());
execute_primitives(context_.bwd_primitives, bwd_stream,
context_.bwd_primitives_args);
context_.src_mem->set_data_handle(DummyData);
context_.diff_dst_mem->set_data_handle(DummyData);
context_.diff_src_mem->set_data_handle(DummyData);
}
std::shared_ptr<EltwiseBwdPd> GetEltwiseBwdPd() { return context_.bwd_pd; }
private:
struct EltwiseBwdContext {
std::shared_ptr<memory> src_mem;
std::shared_ptr<memory> diff_dst_mem;
std::shared_ptr<memory> diff_src_mem;
std::shared_ptr<dnnl::eltwise_backward::desc> bwd_desc;
std::shared_ptr<memory::desc> src_md;
std::shared_ptr<memory::desc> diff_dst_md;
std::shared_ptr<memory::desc> common_md;
std::shared_ptr<dnnl::eltwise_forward::desc> fwd_desc;
std::shared_ptr<EltwiseFwdPd> fwd_pd;
std::shared_ptr<EltwiseBwdPd> bwd_pd;
std::shared_ptr<dnnl::primitive> eltwise_bwd;
std::vector<dnnl::primitive> bwd_primitives;
std::vector<MemoryArgsMap> bwd_primitives_args;
EltwiseBwdContext()
: src_mem(nullptr),
diff_dst_mem(nullptr),
diff_src_mem(nullptr),
src_md(nullptr),
diff_dst_md(nullptr),
common_md(nullptr),
fwd_desc(nullptr),
fwd_pd(nullptr),
bwd_pd(nullptr),
eltwise_bwd(nullptr) {}
};
void Setup(const MklEltwiseBwdParams<T>& bwdParams) {
context_.src_md.reset(new memory::desc(bwdParams.common_md.data));
context_.diff_dst_md.reset(new memory::desc(bwdParams.common_md.data));
context_.fwd_desc.reset(new dnnl::eltwise_forward::desc(
prop_kind::forward_training, bwdParams.alg_kind, *context_.src_md,
bwdParams.alpha, bwdParams.beta));
context_.fwd_pd.reset(new EltwiseFwdPd(*context_.fwd_desc, cpu_engine_));
context_.bwd_desc.reset(new dnnl::eltwise_backward::desc(
bwdParams.alg_kind, *context_.diff_dst_md, *context_.src_md,
bwdParams.alpha, bwdParams.beta));
context_.bwd_pd.reset(
new EltwiseBwdPd(*context_.bwd_desc, cpu_engine_, *context_.fwd_pd));
auto bwd_pd = context_.bwd_pd.get();
context_.src_mem.reset(
new memory(bwd_pd->src_desc(), cpu_engine_, DummyData));
context_.diff_dst_mem.reset(
new memory(bwd_pd->diff_dst_desc(), cpu_engine_, DummyData));
context_.diff_src_mem.reset(
new memory(bwd_pd->diff_src_desc(), cpu_engine_, DummyData));
context_.eltwise_bwd.reset(new dnnl::eltwise_backward(*context_.bwd_pd));
context_.bwd_primitives_args.push_back(
{{bwdParams.forward_input_type, *context_.src_mem},
{DNNL_ARG_DIFF_DST, *context_.diff_dst_mem},
{DNNL_ARG_DIFF_SRC, *context_.diff_src_mem}});
context_.bwd_primitives.push_back(*context_.eltwise_bwd);
}
struct EltwiseBwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T>
class MklEltwiseBwdPrimitiveFactory : public MklPrimitiveFactory<T> {
private:
MklEltwiseBwdPrimitiveFactory() {}
~MklEltwiseBwdPrimitiveFactory() {}
public:
static MklEltwiseBwdPrimitive<T>* Get(
const MklEltwiseBwdParams<T>& bwdParams) {
MklEltwiseBwdPrimitive<T>* eltwise_backward = nullptr;
eltwise_backward = static_cast<MklEltwiseBwdPrimitive<T>*>(
MklEltwiseBwdPrimitiveFactory<T>::GetInstance().GetEltwiseBwd(
bwdParams));
if (eltwise_backward == nullptr) {
eltwise_backward = new MklEltwiseBwdPrimitive<T>(bwdParams);
MklEltwiseBwdPrimitiveFactory<T>::GetInstance().SetEltwiseBwd(
bwdParams, eltwise_backward);
}
return eltwise_backward;
}
static MklEltwiseBwdPrimitiveFactory& GetInstance() {
static MklEltwiseBwdPrimitiveFactory instance_;
return instance_;
}
private:
static string CreateKey(const MklEltwiseBwdParams<T>& bwdParams) {
string prefix = "eltwise_bwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(bwdParams.src_dims);
key_creator.AddAsKey(static_cast<int>(bwdParams.alg_kind));
key_creator.AddAsKey(static_cast<float>(bwdParams.alpha));
key_creator.AddAsKey(static_cast<float>(bwdParams.beta));
return key_creator.GetKey();
}
MklPrimitive* GetEltwiseBwd(const MklEltwiseBwdParams<T>& bwdParams) {
string key = CreateKey(bwdParams);
return this->GetOp(key);
}
void SetEltwiseBwd(const MklEltwiseBwdParams<T>& bwdParams,
MklPrimitive* op) {
string key = CreateKey(bwdParams);
this->SetOp(key, op);
}
};
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T, algorithm alg_kind>
class MklReluOpBase : public OpKernel {
public:
~MklReluOpBase() {}
explicit MklReluOpBase(OpKernelConstruction* context, float alpha, float beta)
: OpKernel(context), alpha_(alpha), beta_(beta) {}
virtual void Compute_Scalar(OpKernelContext* context) = 0;
void Compute(OpKernelContext* context) override {
try {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
if (src_tensor.dims() == 0) {
Compute_Scalar(context);
return;
}
MklDnnShape dnn_shape_dst;
TensorShape tf_shape_dst;
Tensor* dst_tensor = nullptr;
if (src_tensor.shape().num_elements() == 0) {
dnn_shape_dst.SetMklTensor(false);
tf_shape_dst = MklGetInput(context, src_index).shape();
AllocateOutputSetMklShape(context, dst_index, &dst_tensor, tf_shape_dst,
dnn_shape_dst);
return;
}
MklDnnData<T> src(&cpu_engine);
memory::dims src_dims;
memory::desc src_md({}, memory::data_type::undef,
memory::format_tag::undef);
if (dnn_shape_src.IsMklTensor()) {
src_md = dnn_shape_src.GetMklLayout();
src_dims = dnn_shape_src.GetSizesAsMklDnnDims();
} else {
src_dims = TFShapeToMklDnnDims(src_tensor.shape());
auto src_strides = CalculateTFStrides(src_dims);
src_md = MklDnnData<T>::CreateBlockedMemDesc(src_dims, src_strides);
}
MklEltwiseFwdParams<T> fwdParams(src_dims, src_md, alg_kind, alpha_,
beta_);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklEltwiseFwdPrimitive<T>* eltwise_fwd =
MklEltwiseFwdPrimitiveFactory<T>::Get(fwdParams);
auto eltwise_fwd_pd = eltwise_fwd->GetEltwiseFwdPd();
std::shared_ptr<stream> fwd_cpu_stream;
fwd_cpu_stream.reset(CreateStream(&eigen_tp, eltwise_fwd->GetEngine()));
bool is_src_reordered = false;
const T* src_data = src_tensor.flat<T>().data();
if (src_md != eltwise_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(eltwise_fwd_pd->src_desc(), cpu_engine,
context);
src_data = const_cast<T*>(
reinterpret_cast<T*>(src.GetOpMem().get_data_handle()));
is_src_reordered = true;
}
if (is_src_reordered || dnn_shape_src.IsMklTensor()) {
dnn_shape_dst.SetMklTensor(true);
auto dst_pd = eltwise_fwd_pd->dst_desc();
dnn_shape_dst.SetMklLayout(&dst_pd);
dnn_shape_dst.SetElemType(MklDnnType<T>());
if (dnn_shape_src.IsMklTensor()) {
dnn_shape_dst.SetTfLayout(dnn_shape_src.GetDimension(),
dnn_shape_src.GetSizesAsMklDnnDims(),
dnn_shape_src.GetTfDataFormat());
} else {
dnn_shape_dst.SetTfLayout(src_tensor.dims(),
TFShapeToMklDnnDims(src_tensor.shape()),
MklTensorFormat::FORMAT_BLOCKED);
}
tf_shape_dst.AddDim(dst_pd.get_size() / sizeof(T));
} else {
dnn_shape_dst.SetMklTensor(false);
tf_shape_dst = src_tensor.shape();
}
if (is_src_reordered) {
AllocateOutputSetMklShape(context, dst_index, &dst_tensor, tf_shape_dst,
dnn_shape_dst);
} else {
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{static_cast<const int>(src_index)},
static_cast<const int>(dst_index),
tf_shape_dst, &dst_tensor));
AllocateOutputSetMklShape(context, dst_index, dnn_shape_dst);
}
T* dst_data = dst_tensor->flat<T>().data();
eltwise_fwd->Execute(src_data, dst_data, fwd_cpu_stream);
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(
context,
errors::Aborted("Operation received an exception:", error_msg));
}
}
private:
engine cpu_engine = engine(engine::kind::cpu, 0);
std::shared_ptr<EltwiseFwdPd> relu_fwd_pd;
protected:
float alpha_;
float beta_;
};
template <typename Device, typename T, algorithm alg_kind>
class MklReluGradOpBase : public OpKernel {
public:
~MklReluGradOpBase() {}
explicit MklReluGradOpBase(OpKernelConstruction* context, float alpha,
float beta)
: OpKernel(context), alpha_(alpha), beta_(beta) {}
virtual void Compute_Scalar(OpKernelContext* context) = 0;
virtual int GetDiffDstIndex() const { return 0; }
virtual int GetSrcIndex() const { return 1; }
virtual int GetDiffSrcIndex() const { return 0; }
virtual int GetTypeOfInputTensorFromFwdOp() const { return DNNL_ARG_SRC; }
void Compute(OpKernelContext* context) {
try {
MklDnnData<T> src(&cpu_engine);
MklDnnData<T> diff_dst(&cpu_engine);
size_t diff_dst_index = GetDiffDstIndex();
size_t src_index = GetSrcIndex();
const size_t diff_src_index = GetDiffSrcIndex();
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_src, dnn_shape_diff_dst;
GetMklShape(context, src_index, &dnn_shape_src);
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
int src_dims_size = src_tensor.dims();
if (src_dims_size == 0) {
Compute_Scalar(context);
return;
}
TensorShape tf_shape_diff_src;
MklDnnShape dnn_shape_diff_src;
if (src_tensor.shape().num_elements() == 0) {
dnn_shape_diff_src.SetMklTensor(false);
tf_shape_diff_src = MklGetInput(context, diff_src_index).shape();
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
tf_shape_diff_src, dnn_shape_diff_src);
return;
}
memory::dims src_dims = {};
memory::desc src_md({}, memory::data_type::undef,
memory::format_tag::undef);
memory::desc diff_dst_md({}, memory::data_type::undef,
memory::format_tag::undef);
if (!dnn_shape_src.IsMklTensor() && !dnn_shape_diff_dst.IsMklTensor()) {
src_dims = TFShapeToMklDnnDims(src_tensor.shape());
auto src_strides = CalculateTFStrides(src_dims);
src_md = MklDnnData<T>::CreateBlockedMemDesc(src_dims, src_strides);
diff_dst_md = src_md;
} else if (dnn_shape_src.IsMklTensor() &&
!dnn_shape_diff_dst.IsMklTensor()) {
src_md = dnn_shape_src.GetMklLayout();
src_dims = dnn_shape_src.GetSizesAsMklDnnDims();
MklTensorFormat src_mkl_data_format = dnn_shape_src.GetTfDataFormat();
auto src_tf_data_format =
MklDnnDataFormatToTFDataFormat(src_mkl_data_format);
auto diff_dst_dims = TFShapeToMklDnnDimsInNCHW(diff_dst_tensor.shape(),
src_tf_data_format);
diff_dst_md = memory::desc(
diff_dst_dims, MklDnnType<T>(),
MklTensorFormatToMklDnnDataFormat(src_mkl_data_format));
} else if (!dnn_shape_src.IsMklTensor() &&
dnn_shape_diff_dst.IsMklTensor()) {
diff_dst_md = dnn_shape_diff_dst.GetMklLayout();
MklTensorFormat diff_dst_mkl_data_format =
dnn_shape_diff_dst.GetTfDataFormat();
auto diff_dst_tf_data_format =
MklDnnDataFormatToTFDataFormat(diff_dst_mkl_data_format);
src_dims = (src_tensor.dims() == 4)
? TFShapeToMklDnnDimsInNCHW(src_tensor.shape(),
diff_dst_tf_data_format)
: TFShapeToMklDnnDimsInNCDHW(src_tensor.shape(),
diff_dst_tf_data_format);
src_md = memory::desc(
src_dims, MklDnnType<T>(),
MklTensorFormatToMklDnnDataFormat(diff_dst_mkl_data_format));
} else {
src_md = dnn_shape_src.GetMklLayout();
diff_dst_md = dnn_shape_diff_dst.GetMklLayout();
src_dims = dnn_shape_src.GetSizesAsMklDnnDims();
}
memory::desc common_md({}, memory::data_type::undef,
memory::format_tag::undef);
if (dnn_shape_src.IsMklTensor() || dnn_shape_diff_dst.IsMklTensor()) {
common_md = dnn_shape_src.IsMklTensor() ? src_md : diff_dst_md;
} else {
common_md = src_md;
}
MklEltwiseBwdParams<T> bwdParams(src_dims, common_md, alg_kind, alpha_,
beta_, GetTypeOfInputTensorFromFwdOp());
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklEltwiseBwdPrimitive<T>* eltwise_bwd =
MklEltwiseBwdPrimitiveFactory<T>::Get(bwdParams);
auto eltwise_bwd_pd = eltwise_bwd->GetEltwiseBwdPd();
std::shared_ptr<stream> bwd_cpu_stream;
bwd_cpu_stream.reset(CreateStream(&eigen_tp, eltwise_bwd->GetEngine()));
const T* src_data = src_tensor.flat<T>().data();
if (src_md != eltwise_bwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(eltwise_bwd_pd.get()->diff_src_desc(),
cpu_engine, context);
src_data = const_cast<T*>(
reinterpret_cast<T*>(src.GetOpMem().get_data_handle()));
}
const T* diff_dst_data = diff_dst_tensor.flat<T>().data();
if (diff_dst_md != eltwise_bwd_pd->diff_dst_desc()) {
diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
diff_dst.CheckReorderToOpMem(eltwise_bwd_pd.get()->diff_src_desc(),
cpu_engine, context);
diff_dst_data = const_cast<T*>(
reinterpret_cast<T*>(diff_dst.GetOpMem().get_data_handle()));
}
if (dnn_shape_src.IsMklTensor() || dnn_shape_diff_dst.IsMklTensor()) {
auto diff_src_pd = eltwise_bwd_pd->diff_src_desc();
dnn_shape_diff_src.SetMklTensor(true);
dnn_shape_diff_src.SetMklLayout(&diff_src_pd);
dnn_shape_diff_src.SetElemType(MklDnnType<T>());
if (dnn_shape_src.IsMklTensor()) {
dnn_shape_diff_src.SetTfLayout(dnn_shape_src.GetDimension(),
dnn_shape_src.GetSizesAsMklDnnDims(),
dnn_shape_src.GetTfDataFormat());
} else {
dnn_shape_diff_src.SetTfLayout(
dnn_shape_diff_dst.GetDimension(),
dnn_shape_diff_dst.GetSizesAsMklDnnDims(),
dnn_shape_diff_dst.GetTfDataFormat());
}
tf_shape_diff_src.AddDim(diff_src_pd.get_size() / sizeof(T));
} else {
dnn_shape_diff_src.SetMklTensor(false);
tf_shape_diff_src = src_tensor.shape();
}
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{static_cast<const int>(diff_dst_index)},
static_cast<const int>(diff_src_index),
tf_shape_diff_src, &diff_src_tensor));
AllocateOutputSetMklShape(context, diff_src_index, dnn_shape_diff_src);
T* diff_src_data = diff_src_tensor->flat<T>().data();
eltwise_bwd->Execute(src_data, diff_dst_data, diff_src_data,
bwd_cpu_stream);
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(
context,
errors::Aborted("Operation received an exception:", error_msg));
}
}
private:
engine cpu_engine = engine(engine::kind::cpu, 0);
std::shared_ptr<EltwiseFwdPd> relu_fwd_pd;
protected:
float alpha_;
float beta_;
};
template <typename Device, typename T>
class MklReluOp
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklReluOp() {}
explicit MklReluOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu>(context, 0.0f,
0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
(static_cast<T*>(out_o))[0] =
std::max((static_cast<T*>(user_i))[0], static_cast<T>(0));
return;
}
};
template <typename Device, typename T>
class MklReluGradOp
: public MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklReluGradOp() {}
explicit MklReluGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu>(
context, 0.0f, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
void* out_o = static_cast<void*>(diff_src_tensor->flat<T>().data());
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
void* user_g =
static_cast<void*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
(static_cast<T*>(out_o))[0] =
(static_cast<T*>(user_g))[0] *
(static_cast<T>((static_cast<T*>(user_i))[0] > static_cast<T>(0)));
return;
}
};
template <typename Device, typename T>
class MklEluOp : public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_elu> {
public:
~MklEluOp() {}
explicit MklEluOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_elu>(context, 0.0f,
0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
T feature = (static_cast<T*>(user_i))[0];
if (feature < static_cast<T>(0))
(static_cast<T*>(out_o))[0] = Eigen::numext::exp(feature);
else
(static_cast<T*>(out_o))[0] = feature;
return;
}
};
template <typename Device, typename T>
class MklEluGradOp
: public MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_elu> {
public:
~MklEluGradOp() {}
explicit MklEluGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_elu>(
context, 0.0f, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
void* out_o = static_cast<void*>(diff_src_tensor->flat<T>().data());
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
void* user_g =
static_cast<void*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
T feature = (static_cast<T*>(user_i))[0];
if (feature > static_cast<T>(0)) {
(static_cast<T*>(out_o))[0] = (static_cast<T*>(user_g))[0];
} else {
T elu = Eigen::numext::exp(feature) - static_cast<T>(1);
(static_cast<T*>(out_o))[0] =
(static_cast<T*>(user_g))[0] * (elu + static_cast<T>(1));
}
}
};
template <typename Device, typename T>
class MklTanhOp
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_tanh> {
public:
~MklTanhOp() {}
explicit MklTanhOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_tanh>(context, 0.0f,
0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
T feature = (static_cast<T*>(user_i))[0];
T e1 = Eigen::numext::exp(feature);
T e2 = Eigen::numext::exp(-feature);
(static_cast<T*>(out_o))[0] = (e1 - e2) / (e1 + e2);
return;
}
};
template <typename Device, typename T>
class MklTanhGradOp
: public MklReluGradOpBase<Device, T,
dnnl::algorithm::eltwise_tanh_use_dst_for_bwd> {
public:
~MklTanhGradOp() {}
explicit MklTanhGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T,
dnnl::algorithm::eltwise_tanh_use_dst_for_bwd>(
context, 0.0f, 0.0f) {}
virtual int GetDiffDstIndex() const { return 1; }
virtual int GetSrcIndex() const { return 0; }
virtual int GetDiffSrcIndex() const { return 0; }
virtual int GetTypeOfInputTensorFromFwdOp() const { return DNNL_ARG_DST; }
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = GetDiffDstIndex();
const size_t src_index = GetSrcIndex();
const size_t diff_src_index = GetDiffSrcIndex();
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
void* out_o = static_cast<void*>(diff_src_tensor->flat<T>().data());
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
T tanh = (static_cast<T*>(user_i))[0];
void* user_g =
static_cast<void*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
(static_cast<T*>(out_o))[0] =
(static_cast<T*>(user_g))[0] * (static_cast<T>(1) - tanh * tanh);
}
};
#define RELU6_UPPER_BOUND 6.0f
template <typename Device, typename T>
class MklRelu6Op
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_bounded_relu> {
public:
~MklRelu6Op() {}
explicit MklRelu6Op(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_bounded_relu>(
context, RELU6_UPPER_BOUND, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
T* out_o = dst_tensor->flat<T>().data();
out_o[0] = std::min(std::max(user_i[0], static_cast<T>(0)),
static_cast<T>(RELU6_UPPER_BOUND));
return;
}
};
template <typename Device, typename T>
class MklRelu6GradOp
: public MklReluGradOpBase<Device, T,
dnnl::algorithm::eltwise_bounded_relu> {
public:
~MklRelu6GradOp() {}
explicit MklRelu6GradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_bounded_relu>(
context, RELU6_UPPER_BOUND, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
T* out_o = diff_src_tensor->flat<T>().data();
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
T* user_g = const_cast<T*>(diff_dst_tensor.flat<T>().data());
out_o[0] = user_g[0] *
static_cast<T>(user_i[0] > static_cast<T>(0) &&
(user_i[0] < static_cast<T>(RELU6_UPPER_BOUND)));
return;
}
};
template <typename Device, typename T>
class MklLeakyReluOp
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklLeakyReluOp() {}
explicit MklLeakyReluOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu>(context, 0.0f,
0.0f) {
float alpha;
OP_REQUIRES_OK(context, context->GetAttr("alpha", &alpha));
OP_REQUIRES(
context, alpha <= 1,
errors::InvalidArgument("MKL LeakyRelu only supports alpha <= 1. "
"alpha is: ",
alpha));
this->alpha_ = alpha;
}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
T* out_o = dst_tensor->flat<T>().data();
out_o[0] = user_i[0] >= T(0) ? user_i[0] : user_i[0] * T(this->alpha_);
return;
}
};
template <typename Device, typename T>
class MklLeakyReluGradOp
: public MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklLeakyReluGradOp() {}
explicit MklLeakyReluGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu>(
context, 0.0f, 0.0f) {
float alpha;
OP_REQUIRES_OK(context, context->GetAttr("alpha", &alpha));
OP_REQUIRES(
context, alpha <= 1,
errors::InvalidArgument("MKL LeakyRelu only supports alpha <= 1. "
"alpha is: ",
alpha));
this->alpha_ = alpha;
}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
T* out_o = diff_src_tensor->flat<T>().data();
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
T* user_g = const_cast<T*>(diff_dst_tensor.flat<T>().data());
out_o[0] = user_i[0] >= static_cast<T>(0)
? user_g[0]
: user_g[0] * static_cast<T>(this->alpha_);
return;
}
};
#define REGISTER_RELU_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklRelu") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklReluOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklReluGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklReluGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_RELU_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_RELU_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_ELU_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklElu") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklEluOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklEluGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklEluGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_ELU_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_ELU_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_TANH_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklTanh") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklTanhOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklTanhGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklTanhGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_TANH_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_TANH_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_RELU6_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklRelu6") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklRelu6Op<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklRelu6Grad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklRelu6GradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_RELU6_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_RELU6_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_LeakyRelu_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklLeakyRelu") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklLeakyReluOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklLeakyReluGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklLeakyReluGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_LeakyRelu_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_LeakyRelu_MKL_SUPPORTED_KERNELS_TYPES);
}
#endif | #if defined(INTEL_MKL) && !defined(ENABLE_ONEDNN_V3) && defined(ENABLE_MKL)
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
static Graph* Activation(const string& op_name, const string& kind,
const TensorShape& shape) {
auto* graph = new Graph(OpRegistry::Global());
const string node_name = kind + "_" + op_name;
const bool isForwardOp = !tensorflow::str_util::EndsWith(op_name, "Grad");
const bool isDefault = (kind == "Default");
Tensor input_t(DT_FLOAT, shape);
input_t.flat<float>().setRandom();
Node* input = test::graph::Constant(graph, input_t, "input");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
if (isForwardOp) {
if (isDefault) {
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), op_name)
.Input(input)
.Attr("T", DT_FLOAT)
.Finalize(graph, nullptr));
return graph;
}
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), "_Mkl" + op_name)
.Input(input)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("_kernel", "MklLayoutDependentOp")
.Finalize(graph, nullptr));
return graph;
}
Tensor grad_t(DT_FLOAT, shape);
grad_t.flat<float>().setRandom();
Node* grad = test::graph::Constant(graph, grad_t, "grad");
if (isDefault) {
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), op_name)
.Input(grad)
.Input(input)
.Attr("T", DT_FLOAT)
.Finalize(graph, nullptr));
return graph;
}
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), "_Mkl" + op_name)
.Input(grad)
.Input(input)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("_kernel", "MklLayoutDependentOp")
.Finalize(graph, nullptr));
return graph;
}
#define BM_Activation(op, kind, A, B, C, D, type) \
static void BM_##op##_##kind##_##type##_##A##_##B##_##C##_##D( \
::testing::benchmark::State& state) { \
int64 num_computed_elements = (A) * (B) * (C) * (D); \
int64 flops_per_iter = num_computed_elements; \
\
test::Benchmark(#type, Activation(#op, #kind, {A, B, C, D}), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_##op##_##kind##_##type##_##A##_##B##_##C##_##D)
#define BM(op, A, B, C, D, type) \
BM_Activation(op, Default, A, B, C, D, type); \
BM_Activation(op, Mkl, A, B, C, D, type);
#define TEST_ALL_SIZES(OP) \
BM(OP, 2, 4, 8, 16, cpu); \
BM(OP, 3, 5, 9, 17, cpu); \
BM(OP, 32, 64, 128, 256, cpu); \
BM(OP, 33, 65, 129, 257, cpu);
TEST_ALL_SIZES(Tanh)
TEST_ALL_SIZES(TanhGrad)
TEST_ALL_SIZES(Relu)
TEST_ALL_SIZES(ReluGrad)
TEST_ALL_SIZES(Elu)
TEST_ALL_SIZES(EluGrad)
TEST_ALL_SIZES(Relu6)
TEST_ALL_SIZES(Relu6Grad)
TEST_ALL_SIZES(LeakyRelu)
TEST_ALL_SIZES(LeakyReluGrad)
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_relu_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_relu_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f7eb9d2-3b42-4f7c-8958-4e6e67b9b0a1 | cpp | tensorflow/tensorflow | mkl_qmatmul_op | tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc | tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc | #if defined(INTEL_MKL)
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/mkl/mkl_matmul_ops_common.h"
#include "tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h"
#include "tensorflow/core/kernels/no_op.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/work_sharder.h"
namespace {
enum {
QUANTIZE_MODE_MIN_FIRST,
QUANTIZE_MODE_SCALED,
};
}
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define TSCALED_BIAS Tbias
#else
#define TSCALED_BIAS float
#endif
template <typename Device, typename Tinput, typename Tweight, typename Tbias,
typename Toutput, bool native_format = false>
class MklDnnQuantizedMatMulOp
: public MklDnnMatMulOpBase<Tweight, Tbias, Toutput> {
public:
virtual ~MklDnnQuantizedMatMulOp() {
if (this->input_bias_ != nullptr) {
delete this->input_bias_;
input_bias_ = nullptr;
}
if (this->scaled_bias_ != nullptr) {
delete this->scaled_bias_;
scaled_bias_ = nullptr;
}
if (this->comp_bias_ != nullptr) {
delete this->comp_bias_;
comp_bias_ = nullptr;
}
}
float* GetCompBiasBuffer(int size) {
if (comp_bias_ == nullptr) {
comp_bias_ = new float[size];
}
return comp_bias_;
}
explicit MklDnnQuantizedMatMulOp(OpKernelConstruction* context)
: MklDnnMatMulOpBase<Tweight, Tbias, Toutput>(context) {
string mode_string;
OP_REQUIRES_OK(context, context->GetAttr("input_quant_mode", &mode_string));
if (mode_string == "MIN_FIRST") {
mode_ = QUANTIZE_MODE_MIN_FIRST;
} else if (mode_string == "SCALED") {
mode_ = QUANTIZE_MODE_SCALED;
} else {
context->CtxFailure(absl::InvalidArgumentError(
absl::StrCat("Quantization mode must be either MIN_FIRST or "
"SCALED, but received ",
mode_string)));
}
this->is_weight_const_ = false;
if (context->HasAttr("is_weight_const")) {
OP_REQUIRES_OK(context, context->GetAttr("is_weight_const",
&(this->is_weight_const_)));
}
this->is_bias_const_ = false;
if (context->HasAttr("is_bias_const")) {
OP_REQUIRES_OK(
context, context->GetAttr("is_bias_const", &(this->is_bias_const_)));
}
}
void Compute(OpKernelContext* context) override {
try {
const Tensor& src_tensor = MklGetInput(context, this->kInputIndexSrc);
const Tensor& weight_tensor =
MklGetInput(context, this->kInputIndexWeight);
const Tensor& bias_tensor = MklGetInput(context, this->kInputIndexBias);
MklDnnShape src_mkl_shape, weight_mkl_shape;
GetMklShape(context, this->kInputIndexSrc, &src_mkl_shape, native_format);
GetMklShape(context, this->kInputIndexWeight, &weight_mkl_shape,
native_format);
OP_REQUIRES(
context, !weight_mkl_shape.IsMklTensor(),
absl::InvalidArgumentError("Weight should not be in MKL Layout"));
MklDnnData<Tinput> src(&(this->cpu_engine_));
MklDnnData<Tweight> weight(&(this->cpu_engine_));
memory::dims src_dims, weight_dims;
memory::dims dst_dims_tf_order, dst_dims_mkl_order;
auto src_tf_shape = src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetTfShape()
: src_tensor.shape();
auto weight_tf_shape = weight_mkl_shape.IsMklTensor()
? weight_mkl_shape.GetTfShape()
: weight_tensor.shape();
src_dims = TFShapeToMklDnnDims(src_tf_shape);
weight_dims = TFShapeToMklDnnDims(weight_tf_shape);
dst_dims_mkl_order = {static_cast<int>(src_tf_shape.dim_size(0)),
static_cast<int>(weight_tf_shape.dim_size(1))};
weight_dims = {static_cast<int>(weight_tf_shape.dim_size(1)),
static_cast<int>(weight_tf_shape.dim_size(0))};
Tensor* dst_tensor = nullptr;
auto input_output_fmt = memory::format_tag::nc;
auto input_output_fmt_mkldnn = MklTensorFormat::FORMAT_NC;
auto src_md =
src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetMklLayout()
: memory::desc(src_dims, MklDnnType<Tinput>(), input_output_fmt);
src.SetUsrMem(src_md, &src_tensor);
auto weight_md = weight_mkl_shape.IsMklTensor()
? weight_mkl_shape.GetMklLayout()
: memory::desc(weight_dims, MklDnnType<Tweight>(),
memory::format_tag::io);
weight.SetUsrMem(weight_md, &weight_tensor);
MklDnnMatMulFwdPrimitive<float, Tinput, Tweight, Tbias, Toutput>*
matmul_fwd = nullptr;
memory::dims bias_dims = {static_cast<int>(bias_tensor.dim_size(0))};
MklDnnMatMulFwdParams matmul_fwd_dims(src_dims, weight_dims, bias_dims,
dst_dims_mkl_order);
this->ExtendMklDnnMatMulFwdParams(context, matmul_fwd_dims);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
matmul_fwd =
MklDnnMatMulFwdPrimitiveFactory<float, Tinput, Tweight, Tbias,
Toutput>::Get(matmul_fwd_dims, 0);
std::shared_ptr<dnnl::inner_product_forward::primitive_desc>
matmul_fwd_pd = matmul_fwd->GetPrimitiveDesc();
this->AllocateOutputTensor(context, *matmul_fwd_pd, dst_dims_mkl_order,
input_output_fmt_mkldnn, &dst_tensor,
native_format);
Toutput* dst_data =
reinterpret_cast<Toutput*>(dst_tensor->flat<Toutput>().data());
Tinput* src_data = nullptr;
if (!native_format && src_md != matmul_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(matmul_fwd_pd.get()->src_desc(),
this->cpu_engine_, context);
src_data = static_cast<Tinput*>(src.GetOpMem().get_data_handle());
} else {
src_data = static_cast<Tinput*>(
const_cast<Tinput*>(src_tensor.flat<Tinput>().data()));
}
Tweight* weight_data = nullptr;
if (weight_md != matmul_fwd_pd->weights_desc()) {
bool is_weight_cached = false;
if (this->is_weight_const_) {
if (this->IsWeightCacheEmpty(context)) {
this->CacheWeight(context, matmul_fwd_pd, weight_data,
weight_tensor, weight, weight_md);
}
weight_data =
this->GetCachedWeight(context, matmul_fwd_pd->weights_desc());
is_weight_cached = (weight_data != nullptr);
}
if (!is_weight_cached) {
weight.SetUsrMem(weight_md, &weight_tensor);
weight.CheckReorderToOpMem(matmul_fwd_pd.get()->weights_desc(),
this->cpu_engine_, context);
weight_data =
static_cast<Tweight*>(weight.GetOpMem().get_data_handle());
}
} else {
weight_data = static_cast<Tweight*>(
const_cast<Tweight*>(weight_tensor.flat<Tweight>().data()));
}
std::shared_ptr<stream> cpu_stream;
cpu_stream.reset(CreateStream(&eigen_tp, matmul_fwd->GetEngine()));
UserScratchPad<unsigned char> scratch_pad;
scratch_pad.AllocateSPTensor(matmul_fwd, context);
#ifndef ENABLE_ONEDNN_V3
Tbias* bias_data = this->GetBiasHandle(
context, matmul_fwd_pd, bias_tensor, weight_tensor, cpu_stream);
#else
void* bias_data = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
Tensor temp_scaled_bias_tensor;
this->GetBiasHandle(context, matmul_fwd_pd, bias_tensor, weight_tensor,
cpu_stream, &temp_scaled_bias_tensor, &bias_data);
#endif
matmul_fwd->Execute(src_data, weight_data, bias_data, dst_data,
matmul_fwd_dims, scratch_pad.Get(), cpu_stream);
} catch (dnnl::error& e) {
string error_msg = tensorflow::strings::StrCat(
"Status: ", e.status, ", message: ", string(e.message), ", in file ",
__FILE__, ":", __LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
float min_output_value;
float max_output_value;
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value) {
const Tensor& min_freezed_tensor = context->input(7);
const Tensor& max_freezed_tensor = context->input(8);
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(min_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_freezed_output` must be rank 0 but is rank ",
min_freezed_tensor.dims())));
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(max_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_freezed_output` must be rank 0 but is rank ",
max_freezed_tensor.dims())));
min_output_value = min_freezed_tensor.scalar<float>()();
max_output_value = max_freezed_tensor.scalar<float>()();
} else {
ComputeOutputRangeForInt32(context, &min_output_value, &max_output_value);
}
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value ||
std::is_same<Toutput, qint32>::value) {
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
MklDnnShape output_min_mkl_shape, output_max_mkl_shape;
output_min_mkl_shape.SetMklTensor(false);
output_max_mkl_shape.SetMklTensor(false);
AllocateOutputSetMklShape(context, 1, &output_min, {},
output_min_mkl_shape, native_format);
AllocateOutputSetMklShape(context, 2, &output_max, {},
output_max_mkl_shape, native_format);
output_min->flat<float>()(0) = min_output_value;
output_max->flat<float>()(0) = max_output_value;
}
}
protected:
void ComputeOutputRangeForInt32(OpKernelContext* context,
float* min_output_value,
float* max_output_value) {
const float min_input = context->input(3).scalar<float>()();
const float max_input = context->input(4).scalar<float>()();
const float min_weight = context->input(5).scalar<float>()();
const float max_weight = context->input(6).scalar<float>()();
MklQuantizationRangeForMultiplication<quint8, qint8, qint32>(
min_input, max_input, min_weight, max_weight, min_output_value,
max_output_value);
}
virtual void ExtendMklDnnMatMulFwdParams(OpKernelContext* context,
MklDnnMatMulFwdParams& params) {
params.dtypes.append(typeid(Tinput).name());
params.dtypes.append(typeid(Tweight).name());
params.dtypes.append(typeid(Tbias).name());
params.dtypes.append(typeid(Toutput).name());
const Tensor& min_input_tensor = context->input(3);
const Tensor& max_input_tensor = context->input(4);
const Tensor& min_weight_tensor = context->input(5);
const Tensor& max_weight_tensor = context->input(6);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(min_input_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_a` must be rank 0 but is rank ", min_input_tensor.dims())));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_input_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_a` must be rank 0 but is rank ", max_input_tensor.dims())));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(min_weight_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_b` must be rank 0 but is rank ", min_weight_tensor.dims())));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_weight_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_b` must be rank 0 but is rank ", max_weight_tensor.dims())));
#ifdef ENABLE_ONEDNN_V3
const float min_input = min_input_tensor.scalar<float>()();
const float max_input = max_input_tensor.scalar<float>()();
const float min_weight = min_weight_tensor.scalar<float>()();
const float max_weight = max_weight_tensor.scalar<float>()();
float src_scale =
mode_ == QUANTIZE_MODE_MIN_FIRST
? (max_input - min_input) / 255.0
: std::max(std::abs(min_input), std::abs(max_input)) / 255.0;
float wei_scale =
std::max(std::abs(min_weight), std::abs(max_weight)) / 127.0;
float dst_scale = 1.0;
#endif
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value ||
std::is_same<Toutput, float>::value) {
const Tensor& min_freezed_tensor = context->input(7);
const Tensor& max_freezed_tensor = context->input(8);
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(min_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_freezed_output` must be rank 0 but is rank ",
min_freezed_tensor.dims())));
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(max_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_freezed_output` must be rank 0 but is rank ",
max_freezed_tensor.dims())));
const float min_freezed_output = min_freezed_tensor.scalar<float>()();
const float max_freezed_output = max_freezed_tensor.scalar<float>()();
float scale_eightbit =
std::max(std::abs(min_freezed_output), std::abs(max_freezed_output));
#ifndef ENABLE_ONEDNN_V3
float min_output_value;
float max_output_value;
ComputeOutputRangeForInt32(context, &min_output_value, &max_output_value);
float scale_int32 =
std::max(std::abs(min_output_value), std::abs(max_output_value));
float scale = 1.0;
if (std::is_same<Toutput, quint8>::value) {
scale = scale_int32 / scale_eightbit / static_cast<float>(1u << 23);
} else if (std::is_same<Toutput, qint8>::value) {
scale = scale_int32 / scale_eightbit / static_cast<float>(1u << 24);
} else if (std::is_same<Toutput, float>::value) {
scale = scale_int32 / static_cast<float>(1u << 31);
} else {
scale = scale_int32 / scale_eightbit / static_cast<float>(1u << 24);
}
std::vector<float> output_scale;
output_scale.push_back(scale);
params.post_op_params.push_back({"output_scale", output_scale});
}
#else
if (std::is_same<Toutput, quint8>::value) {
dst_scale = scale_eightbit / 255.0;
} else if (std::is_same<Toutput, qint8>::value) {
dst_scale = scale_eightbit / 127.0;
} else {
dst_scale = 1.0;
}
} else {
if (!std::is_same<Toutput, qint32>::value)
TF_CHECK_OK(Status(absl::StatusCode::kFailedPrecondition,
"Output datatype is expected to be qint32."));
dst_scale = src_scale * wei_scale;
}
params.post_op_params.push_back({"src_scale", {src_scale}});
params.post_op_params.push_back({"wei_scale", {wei_scale}});
params.post_op_params.push_back({"dst_scale", {dst_scale}});
#endif
}
#ifndef ENABLE_ONEDNN_V3
Tbias* GetBiasHandle(
OpKernelContext* context,
std::shared_ptr<dnnl::inner_product_forward::primitive_desc>&
mkldnn_matmul_fwd_pd,
const Tensor& bias_tensor, const Tensor& weight_tensor,
std::shared_ptr<stream> reorder_stream) {
if (std::is_same<Tbias, qint32>::value) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
} else {
const float min_input = context->input(3).flat<float>()(0);
const float max_input = context->input(4).flat<float>()(0);
const float min_weight = context->input(5).flat<float>()(0);
const float max_weight = context->input(6).flat<float>()(0);
std::vector<dnnl::primitive> net;
float out_scale;
if (mode_ == QUANTIZE_MODE_MIN_FIRST) {
int64_t k = weight_tensor.dim_size(0);
int64_t n = weight_tensor.dim_size(1);
float* comp_bias = GetCompBiasBuffer(n);
qint8* wt_buf = static_cast<qint8*>(
const_cast<qint8*>(weight_tensor.flat<qint8>().data()));
const float* bias_buf = static_cast<float*>(
const_cast<float*>(bias_tensor.flat<float>().data()));
float qa_amin = 255 * min_input / (max_input - min_input);
out_scale = (255.0 * 127.0) /
((max_input - min_input) *
std::max(std::abs(max_weight), std::abs(min_weight)));
#ifndef ENABLE_ONEDNN_OPENMP
auto parallel_func = [&](int64_t start, int64_t end) {
for (int64_t j = start; j < end; j++) {
int64_t x = 0;
for (int64_t i = 0; i < k; ++i) {
x += wt_buf[i * n + j];
}
comp_bias[j] =
((bias_buf[j] * out_scale) + static_cast<float>(x * qa_amin));
}
};
const float kArithCost = 2.5f;
const float kMovCost = 1.0f;
float shard_cost = 4 * kArithCost + kMovCost;
const DeviceBase::CpuWorkerThreads& worker_threads =
*(context->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, n, shard_cost,
parallel_func);
#else
#pragma omp parallel for schedule(static)
for (int64_t j = 0; j < n; ++j) {
int64_t x = 0;
for (int64_t i = 0; i < k; ++i) {
x += wt_buf[i * n + j];
}
comp_bias[j] =
((bias_buf[j] * out_scale) + static_cast<float>(x * qa_amin));
}
#endif
return reinterpret_cast<Tbias*>(comp_bias_);
} else if (mode_ == QUANTIZE_MODE_SCALED) {
out_scale = 255.0 * 127.0 / max_input *
std::max(std::abs(max_weight), std::abs(min_weight));
std::vector<float> scales;
scales.push_back(out_scale);
dnnl::primitive_attr bias_attr;
bias_attr.set_output_scales(0, scales);
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
input_bias_ = new memory(mkldnn_matmul_fwd_pd->bias_desc(),
this->cpu_engine_, bias_buf);
scaled_bias_ =
new memory(mkldnn_matmul_fwd_pd->bias_desc(), this->cpu_engine_);
auto reorder_desc = dnnl::reorder::primitive_desc(
*input_bias_, *scaled_bias_, bias_attr);
net.push_back(dnnl::reorder(reorder_desc));
std::unordered_map<int, memory> reorder_net_args = {
{DNNL_ARG_FROM, *input_bias_}, {DNNL_ARG_TO, *scaled_bias_}};
net.at(0).execute(*reorder_stream, reorder_net_args);
return reinterpret_cast<Tbias*>(scaled_bias_->get_data_handle());
} else {
context->CtxFailure(absl::InvalidArgumentError(
"Quantization mode must be either MIN_FIRST or SCALED."));
return nullptr;
}
}
}
#else
void GetBiasHandle(
OpKernelContext* context,
std::shared_ptr<dnnl::inner_product_forward::primitive_desc>&
mkldnn_matmul_fwd_pd,
const Tensor& bias_tensor, const Tensor& weight_tensor,
std::shared_ptr<stream> reorder_stream, Tensor* temp_scaled_bias_tensor,
void** bias_data) {
if (std::is_same<Tbias, float>::value && mode_ == QUANTIZE_MODE_SCALED) {
return;
} else {
const float min_input = context->input(3).flat<float>()(0);
const float max_input = context->input(4).flat<float>()(0);
const float min_weight = context->input(5).flat<float>()(0);
const float max_weight = context->input(6).flat<float>()(0);
std::vector<dnnl::primitive> net;
float out_scale;
bool is_cached_bias_valid = false;
bool is_bias_cache_empty = this->IsBiasCacheEmpty();
if (!is_bias_cache_empty) {
this->GetCachedBias(min_input, max_input, bias_data);
is_cached_bias_valid = (*bias_data != nullptr);
}
if (!is_cached_bias_valid) {
auto scaled_bias_md = mkldnn_matmul_fwd_pd->bias_desc();
TensorShape scaled_bias_shape;
scaled_bias_shape.AddDim(
(scaled_bias_md.get_size() / sizeof(TSCALED_BIAS)));
OP_REQUIRES_OK(
context,
context->allocate_temp(DataTypeToEnum<TSCALED_BIAS>::v(),
scaled_bias_shape, temp_scaled_bias_tensor));
void* scaled_bias_buf = static_cast<void*>(
temp_scaled_bias_tensor->flat<TSCALED_BIAS>().data());
if (mode_ == QUANTIZE_MODE_MIN_FIRST) {
int k = weight_tensor.dim_size(0);
int n = weight_tensor.dim_size(1);
TSCALED_BIAS* comp_bias = (TSCALED_BIAS*)scaled_bias_buf;
qint8* wt_buf = static_cast<qint8*>(
const_cast<qint8*>(weight_tensor.flat<qint8>().data()));
const Tbias* bias_buf = static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
float qa_amin = 255 * min_input / (max_input - min_input);
out_scale = (255.0 * 127.0) /
((max_input - min_input) *
std::max(std::abs(max_weight), std::abs(min_weight)));
for (int j = 0; j < n; ++j) {
int x = 0;
for (int i = 0; i < k; ++i) {
x += wt_buf[i * n + j];
}
if (std::is_same<Tbias, qint32>::value) {
comp_bias[j] = static_cast<float>(bias_buf[j]) / out_scale;
} else {
comp_bias[j] = static_cast<float>(bias_buf[j]) + (x * qa_amin);
}
}
} else if (mode_ == QUANTIZE_MODE_SCALED) {
out_scale = 255.0 * 127.0 /
(max_input *
std::max(std::abs(max_weight), std::abs(min_weight)));
std::vector<float> scales;
scales.push_back(out_scale);
dnnl::primitive_attr bias_attr;
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
bias_attr.set_scales_mask(DNNL_ARG_DST, 0);
auto input_bias_mem =
memory({{static_cast<int>(bias_tensor.NumElements())},
MklDnnType<Tbias>(),
memory::format_tag::x},
this->cpu_engine_, bias_buf);
auto scaled_bias_mem = memory(mkldnn_matmul_fwd_pd->bias_desc(),
this->cpu_engine_, scaled_bias_buf);
auto reorder_desc = dnnl::reorder::primitive_desc(
input_bias_mem, scaled_bias_mem, bias_attr);
net.push_back(dnnl::reorder(reorder_desc));
std::unordered_map<int, memory> reorder_net_args = {
{DNNL_ARG_FROM, input_bias_mem}, {DNNL_ARG_TO, scaled_bias_mem}};
auto scale_mem =
memory({{1}, MklDnnType<float>(), memory::format_tag::x},
this->cpu_engine_, scales.data());
reorder_net_args.insert(
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, scale_mem});
std::vector<MemoryArgsMap> net_args{reorder_net_args};
ExecutePrimitive(net, &net_args, this->cpu_engine_, context);
} else {
context->CtxFailure(
absl::InvalidArgumentError("Quantization mode must be"
"either MIN_FIRST or SCALED."));
}
*bias_data = static_cast<void*>(
temp_scaled_bias_tensor->flat<TSCALED_BIAS>().data());
if (is_bias_cache_empty) {
this->CacheBias(context, *temp_scaled_bias_tensor, min_input,
max_input);
}
}
}
}
bool IsCachedBiasValid(float current_min_input,
float current_max_input) override {
if (this->is_bias_const_ && this->is_weight_const_ &&
std::abs(current_min_input - this->saved_min_input_) < 1e-5 &&
std::abs(current_max_input - this->saved_max_input_) < 1e-5) {
return true;
}
return false;
}
#endif
private:
memory* input_bias_ = nullptr;
memory* scaled_bias_ = nullptr;
float* comp_bias_ = nullptr;
int mode_;
};
template <typename Device, typename Tinput, typename Tweight, typename Tbias,
typename Toutput, bool native_format = false>
class MklDnnQuantizedMatMulReluOp
: public MklDnnQuantizedMatMulOp<Device, Tinput, Tweight, Tbias, Toutput,
native_format> {
public:
virtual ~MklDnnQuantizedMatMulReluOp() {}
explicit MklDnnQuantizedMatMulReluOp(OpKernelConstruction* context)
: MklDnnQuantizedMatMulOp<Device, Tinput, Tweight, Tbias, Toutput,
native_format>(context) {}
protected:
void ExtendMklDnnMatMulFwdParams(OpKernelContext* context,
MklDnnMatMulFwdParams& params) override {
MklDnnQuantizedMatMulOp<Device, quint8, qint8, Tbias, Toutput,
native_format>::ExtendMklDnnMatMulFwdParams(context,
params);
params.post_op_params.push_back({"Relu", {1.0, 0.0, 0.0}});
}
};
#define REGISTER_MKL_KERNEL(op, kernel, bias_type, output_type, is_native) \
REGISTER_KERNEL_BUILDER( \
Name(op) \
.Device(DEVICE_CPU) \
.TypeConstraint<quint8>("T1") \
.TypeConstraint<qint8>("T2") BIAS_TYPE_CONSTRAINT(bias_type) \
.TypeConstraint<output_type>("Toutput") LABEL, \
kernel TEMPLATE_ARGS(CPUDevice, quint8, qint8, bias_type, output_type, \
is_native));
#define REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(op, kernel, output_type, is_native) \
REGISTER_MKL_KERNEL(op, kernel, float, output_type, is_native) \
REGISTER_MKL_KERNEL(op, kernel, qint32, output_type, is_native);
#define LABEL
#define TEMPLATE_ARGS(CPUDevice, quint8, qint8, bias_type, output_type, \
is_native)
#define BIAS_TYPE_CONSTRAINT(bias_type)
REGISTER_MKL_KERNEL("QuantizedMatMulWithBiasAndRelu", NoOp, float, qint32,
false);
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("QuantizedMatMulWithBias", NoOp, qint32,
false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedMatMulWithBiasAndReluAndRequantize", NoOp, quint8, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("QuantizedMatMulWithBiasAndRequantize", NoOp,
quint8, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("QuantizedMatMulWithBiasAndDequantize", NoOp,
float, false);
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#undef LABEL
#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)
#define TEMPLATE_ARGS(CPUDevice, quint8, qint8, bias_type, output_type, \
is_native) \
<CPUDevice, quint8, qint8, bias_type, output_type, is_native>
#define BIAS_TYPE_CONSTRAINT(bias_type)
REGISTER_MKL_KERNEL("_MklQuantizedMatMulWithBiasAndRelu",
MklDnnQuantizedMatMulReluOp, float, qint32, true);
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("_MklQuantizedMatMulWithBias",
MklDnnQuantizedMatMulOp, qint32, true);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedMatMulWithBiasAndReluAndRequantize",
MklDnnQuantizedMatMulReluOp, quint8, true);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("_MklQuantizedMatMulWithBiasAndRequantize",
MklDnnQuantizedMatMulOp, quint8, true);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("_MklQuantizedMatMulWithBiasAndDequantize",
MklDnnQuantizedMatMulOp, float, true);
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#undef LABEL
#undef TSCALED_BIAS
}
#endif |
#if defined(INTEL_MKL)
#define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class QuantizedMatMulTest : public OpsTestBase,
public ::testing::WithParamInterface<bool> {};
TEST_P(QuantizedMatMulTest, Small_withBias) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4}));
test::FillValues<qint32>(&expected, {75, 82, 89, 96, 174, 190, 206, 222});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withNegBias) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {100, -200, 300, -400});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4}));
test::FillValues<qint32>(&expected,
{174, -120, 386, -308, 273, -12, 503, -182});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_WithNegInp) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("input_quant_mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Attr("input_quant_mode", "MIN_FIRST")
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({4, 3}),
{11, 7, 3, 10, 6, 2, 9, 5, 1, 8, 4, 0});
AddInputFromArray<qint8>(TensorShape({3, 2}), {1, 4, 2, 5, 3, 6});
AddInputFromArray<float>(TensorShape({2}), {10.0f, 20.0f});
AddInputFromArray<float>(TensorShape({}), {-12.0f});
AddInputFromArray<float>(TensorShape({}), {243.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({4, 2}));
test::FillValues<qint32>(&expected,
{-28, -63, -34, -78, -40, -93, -46, -108});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndReq) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndRequantize")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<quint8>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs",
{DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QUINT8, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QUINT8)
.Attr("fused_ops", {"BiasAdd", "Requantize"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({2, 4}));
if (is_old_api) {
#ifdef ENABLE_ONEDNN_V3
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
#else
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 184, 169, 234, 179});
#endif
} else {
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
}
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<quint8>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndDeq) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndDequantize")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<float>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_FLOAT)
.Attr("fused_ops", {"BiasAdd", "Dequantize"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
if (is_old_api) {
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
}
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<float>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndRelu) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndRelu")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd", "Relu"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<float>(TensorShape({4}),
{100.0f, -200.0f, 300.0f, -400.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4}));
test::FillValues<qint32>(&expected, {174, 0, 386, 0, 273, 0, 503, 0});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndReluAndReq) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndReluAndRequantize")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<quint8>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs",
{DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QUINT8, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QUINT8)
.Attr("fused_ops", {"BiasAdd", "Relu", "Requantize"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({2, 4}));
if (is_old_api) {
#ifdef ENABLE_ONEDNN_V3
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
#else
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 184, 169, 234, 179});
#endif
} else {
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
}
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<quint8>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withWeightCached) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({1, 3}), {1, 2, 3});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
int64 start_time = Env::Default()->NowMicros();
TF_ASSERT_OK(RunOpKernel());
int64 end_time = Env::Default()->NowMicros();
int64 total_duration_unopt = end_time - start_time;
Tensor expected(allocator(), DT_QINT32, TensorShape({1, 4}));
test::FillValues<qint32>(&expected, {75, 82, 89, 96});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
start_time = Env::Default()->NowMicros();
TF_ASSERT_OK(RunOpKernel());
end_time = Env::Default()->NowMicros();
int64 total_duration_opt = end_time - start_time;
LOG(INFO) << " Time taken by first call : " << total_duration_unopt
<< ", Time taken after Caching : " << total_duration_opt;
EXPECT_LT(total_duration_opt, total_duration_unopt * 0.8);
const Tensor& output_new = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output_new);
}
INSTANTIATE_TEST_SUITE_P(All, QuantizedMatMulTest,
::testing::Values(true, false));
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2dc24273-ac8a-4cd0-ae3e-f318b528f463 | cpp | tensorflow/tensorflow | mkl_fused_batch_norm_op | tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op.cc | tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op_test.cc | #ifdef INTEL_MKL
#include "unsupported/Eigen/CXX11/Tensor"
#include "dnnl.hpp"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/fused_batch_norm_op.h"
#include "tensorflow/core/kernels/no_op.h"
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/tensor_format.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
#define GET_FLAG(bn_flag) static_cast<int>(dnnl::normalization_flags::bn_flag)
#define IS_SET(cflag) (context_.flags & GET_FLAG(cflag))
using dnnl::batch_normalization_backward;
using dnnl::batch_normalization_forward;
using dnnl::prop_kind;
using dnnl::stream;
using BatchNormFwdPd = dnnl::batch_normalization_forward::primitive_desc;
using BatchNormBwdPd = dnnl::batch_normalization_backward::primitive_desc;
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define FORWARD_INFERENCE prop_kind::forward_scoring
#define GET_DIFF_SCALE_DATA_BUFFER diff_scale_shift_data
#define GET_DIFF_SCALE_SHIFT_DATA_BUFFERS diff_scale_shift_data
#define GET_DIFF_SHIFT_DATA_BUFFER diff_scale_shift_data + depth_
#define GET_SCALE_AND_SHIFT_FLAGS GET_FLAG(use_scale_shift)
#define GET_SCALE_DATA_BUFFER scale_shift_data
#define IS_SCALE_AND_SHIFT_FLAG_SET IS_SET(use_scale_shift)
#define SCALE_SHIFT_NET_ARGS \
{ DNNL_ARG_SCALE_SHIFT, *context_.scale_shift_mem }
#define SET_MKL_LAYOUT(md) SetMklLayout(&md)
#else
#define FORWARD_INFERENCE prop_kind::forward_inference
#define GET_DIFF_SCALE_DATA_BUFFER diff_scale_data
#define GET_DIFF_SCALE_SHIFT_DATA_BUFFERS diff_scale_data, diff_shift_data
#define GET_DIFF_SHIFT_DATA_BUFFER diff_shift_data
#define GET_SCALE_AND_SHIFT_FLAGS GET_FLAG(use_scale) | GET_FLAG(use_shift)
#define GET_SCALE_DATA_BUFFER scale_data
#define IS_SCALE_AND_SHIFT_FLAG_SET IS_SET(use_scale) && IS_SET(use_shift)
#define SCALE_SHIFT_NET_ARGS \
{DNNL_ARG_SCALE, *context_.scale_mem}, { DNNL_ARG_SHIFT, *context_.shift_mem }
#define SET_MKL_LAYOUT(md) SetMklLayout(md)
#endif
using CPUDevice = Eigen::ThreadPoolDevice;
using FusedBNActivationMode = functor::FusedBatchNormActivationMode;
struct MklBatchNormFwdParams {
memory::dims src_dims;
int depth;
float eps;
bool training;
TensorFormat data_format;
FusedBNActivationMode activation_mode;
memory::desc src_md;
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md;
#endif
MklBatchNormFwdParams(const memory::dims& src_dims, int depth, float eps,
bool training, TensorFormat data_format,
memory::desc src_md,
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md,
#endif
FusedBNActivationMode activation_mode)
: src_dims(src_dims),
depth(depth),
eps(eps),
training(training),
data_format(data_format),
activation_mode(activation_mode),
#ifndef ENABLE_ONEDNN_V3
src_md(src_md) {
}
#else
src_md(src_md),
dst_md(dst_md) {
}
#endif
};
template <typename T, typename U>
class MklFusedBatchNormFwdPrimitive : public MklPrimitive {
public:
explicit MklFusedBatchNormFwdPrimitive(const MklBatchNormFwdParams& fwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.bn_fwd == nullptr) Setup(fwdParams);
}
~MklFusedBatchNormFwdPrimitive() {}
#ifndef ENABLE_ONEDNN_V3
void Execute(const T* src_data, const U* scale_shift_data, T* dst_data,
#else
void Execute(const T* src_data, const U* scale_data, const U* shift_data,
T* dst_data,
#endif
U* mean_data, U* variance_data,
std::shared_ptr<stream> fwd_stream, U* workspace_data) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *fwd_stream);
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data),
*fwd_stream);
if (IS_SET(use_scale_shift))
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)), *fwd_stream);
if ((context_.pkind == prop_kind::forward_training) ||
(IS_SET(use_global_stats))) {
context_.mean_mem->set_data_handle(static_cast<void*>(mean_data),
*fwd_stream);
context_.variance_mem->set_data_handle(static_cast<void*>(variance_data),
*fwd_stream);
}
if (workspace_data != nullptr) {
context_.ws_mem->set_data_handle(workspace_data, *fwd_stream);
}
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data));
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)));
#else
context_.scale_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_data)));
context_.shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(shift_data)));
#endif
}
if ((context_.pkind == prop_kind::forward_training) ||
(IS_SET(use_global_stats))) {
context_.mean_mem->set_data_handle(static_cast<void*>(mean_data));
context_.variance_mem->set_data_handle(static_cast<void*>(variance_data));
}
if (workspace_data != nullptr) {
context_.ws_mem->set_data_handle(workspace_data);
}
#endif
execute_primitives(context_.fwd_primitives, fwd_stream, context_.net_args);
context_.src_mem->set_data_handle(DummyData);
context_.dst_mem->set_data_handle(DummyData);
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(DummyData);
#else
context_.scale_mem->set_data_handle(DummyData);
context_.shift_mem->set_data_handle(DummyData);
#endif
}
if ((context_.pkind == prop_kind::forward_training) ||
(IS_SET(use_global_stats))) {
context_.mean_mem->set_data_handle(DummyData);
context_.variance_mem->set_data_handle(DummyData);
}
if (workspace_data != nullptr) {
context_.ws_mem->set_data_handle(DummyData);
}
}
memory::desc GetDstPd() const { return context_.dst_mem->get_desc(); }
std::shared_ptr<BatchNormFwdPd> GetBatchNormFwdPd() const {
return context_.fwd_pd;
}
private:
struct BatchNormFwdContext {
int64 flags;
dnnl::prop_kind pkind;
std::shared_ptr<dnnl::memory> src_mem;
#ifndef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::memory> scale_shift_mem;
#else
std::shared_ptr<dnnl::memory> scale_mem;
std::shared_ptr<dnnl::memory> shift_mem;
#endif
std::shared_ptr<dnnl::memory> dst_mem;
std::shared_ptr<dnnl::memory> mean_mem;
std::shared_ptr<dnnl::memory> variance_mem;
std::shared_ptr<dnnl::memory> ws_mem;
std::shared_ptr<BatchNormFwdPd> fwd_pd;
std::shared_ptr<dnnl::primitive> bn_fwd;
std::vector<dnnl::primitive> fwd_primitives;
std::vector<std::unordered_map<int, memory>> net_args;
BatchNormFwdContext()
: flags(0),
pkind(prop_kind::forward_training),
src_mem(nullptr),
#ifndef ENABLE_ONEDNN_V3
scale_shift_mem(nullptr),
#else
scale_mem(nullptr),
shift_mem(nullptr),
#endif
dst_mem(nullptr),
mean_mem(nullptr),
variance_mem(nullptr),
ws_mem(nullptr),
bn_fwd(nullptr) {
}
};
void Setup(const MklBatchNormFwdParams& fwdParams) {
context_.flags = GET_SCALE_AND_SHIFT_FLAGS |
(fwdParams.training ? false : GET_FLAG(use_global_stats));
context_.pkind =
fwdParams.training ? prop_kind::forward_training : FORWARD_INFERENCE;
if (fwdParams.activation_mode == FusedBNActivationMode::kRelu) {
context_.flags |= GET_FLAG(fuse_norm_relu);
}
auto src_md = fwdParams.src_md;
#ifndef ENABLE_ONEDNN_V3
auto fwd_desc = batch_normalization_forward::desc(
context_.pkind, src_md, fwdParams.eps,
static_cast<dnnl::normalization_flags>(context_.flags));
context_.fwd_pd.reset(new BatchNormFwdPd(fwd_desc, cpu_engine_));
#else
auto dst_md = fwdParams.dst_md;
context_.fwd_pd.reset(new BatchNormFwdPd(
cpu_engine_, context_.pkind, src_md, dst_md, fwdParams.eps,
static_cast<dnnl::normalization_flags>(context_.flags)));
#endif
context_.src_mem.reset(
new memory(context_.fwd_pd->src_desc(), cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(context_.fwd_pd->dst_desc(), cpu_engine_, DummyData));
memory::dims m_dims = {1, fwdParams.depth};
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
memory::dims s_dims = {2, fwdParams.depth};
context_.scale_shift_mem.reset(
new memory({{s_dims}, MklDnnType<U>(), memory::format_tag::nc},
cpu_engine_, DummyData));
#else
memory::dims s_dims = {fwdParams.depth};
context_.scale_mem.reset(
new memory({{s_dims}, MklDnnType<U>(), memory::format_tag::x},
cpu_engine_, DummyData));
context_.shift_mem.reset(
new memory({{s_dims}, MklDnnType<U>(), memory::format_tag::x},
cpu_engine_, DummyData));
#endif
}
if (fwdParams.training || (IS_SET(use_global_stats))) {
context_.mean_mem.reset(
new memory({{m_dims}, MklDnnType<U>(), memory::format_tag::nc},
cpu_engine_, DummyData));
context_.variance_mem.reset(
new memory({{m_dims}, MklDnnType<U>(), memory::format_tag::nc},
cpu_engine_, DummyData));
}
if (IS_SET(fuse_norm_relu)) {
context_.ws_mem.reset(new memory(context_.fwd_pd->workspace_desc(),
cpu_engine_, DummyData));
}
if (!fwdParams.training && !(IS_SET(use_global_stats))) {
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
context_.net_args.push_back({{DNNL_ARG_SRC, *context_.src_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem}});
} else {
context_.net_args.push_back({{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_DST, *context_.dst_mem}});
}
context_.bn_fwd.reset(new batch_normalization_forward(*context_.fwd_pd));
} else if (IS_SET(use_global_stats)) {
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem}});
}
} else {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_DST, *context_.dst_mem}});
}
}
context_.bn_fwd.reset(new batch_normalization_forward(*context_.fwd_pd));
} else {
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem}});
}
} else {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem}});
}
}
context_.bn_fwd.reset(new batch_normalization_forward(*context_.fwd_pd));
}
context_.fwd_primitives.push_back(*context_.bn_fwd);
}
struct BatchNormFwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T, typename U>
class MklFusedBatchNormFwdPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklFusedBatchNormFwdPrimitive<T, U>* Get(
const MklBatchNormFwdParams& fwdParams) {
auto bn_fwd = static_cast<MklFusedBatchNormFwdPrimitive<T, U>*>(
MklFusedBatchNormFwdPrimitiveFactory<T, U>::GetInstance()
.GetBatchNormFwd(fwdParams));
if (bn_fwd == nullptr) {
bn_fwd = new MklFusedBatchNormFwdPrimitive<T, U>(fwdParams);
MklFusedBatchNormFwdPrimitiveFactory<T, U>::GetInstance().SetBatchNormFwd(
fwdParams, bn_fwd);
}
return bn_fwd;
}
static MklFusedBatchNormFwdPrimitiveFactory& GetInstance() {
static MklFusedBatchNormFwdPrimitiveFactory instance_;
return instance_;
}
private:
MklFusedBatchNormFwdPrimitiveFactory() {}
~MklFusedBatchNormFwdPrimitiveFactory() {}
static string CreateKey(const MklBatchNormFwdParams& fwdParams) {
string prefix = "bn_fwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(fwdParams.src_dims);
key_creator.AddAsKey<int>(fwdParams.depth);
key_creator.AddAsKey<float>(fwdParams.eps);
key_creator.AddAsKey<bool>(fwdParams.training);
key_creator.AddAsKey<TensorFormat>(fwdParams.data_format);
key_creator.AddAsKey<FusedBNActivationMode>(fwdParams.activation_mode);
key_creator.AddAsKey(typeid(T).name());
key_creator.AddAsKey(typeid(U).name());
return key_creator.GetKey();
}
MklPrimitive* GetBatchNormFwd(const MklBatchNormFwdParams& fwdParams) {
string key = CreateKey(fwdParams);
return this->GetOp(key);
}
void SetBatchNormFwd(const MklBatchNormFwdParams& fwdParams,
MklPrimitive* op) {
string key = CreateKey(fwdParams);
this->SetOp(key, op);
}
};
struct MklBatchNormBwdParams {
memory::dims src_dims;
memory::dims diff_dst_dims;
int depth;
float eps;
bool training;
TensorFormat data_format;
memory::desc src_md;
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md;
memory::desc diff_src_md;
#endif
memory::desc diff_dst_md;
MklBatchNormBwdParams(memory::dims src_dims, memory::dims diff_dst_dims,
int depth, float eps, bool training,
TensorFormat data_format, memory::desc src_md,
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md, memory::desc diff_src_md,
#endif
memory::desc diff_dst_md)
: src_dims(src_dims),
diff_dst_dims(diff_dst_dims),
depth(depth),
eps(eps),
training(training),
data_format(data_format),
src_md(src_md),
#ifdef ENABLE_ONEDNN_V3
dst_md(dst_md),
diff_src_md(diff_src_md),
#endif
diff_dst_md(diff_dst_md) {
}
};
template <typename T, typename U>
class MklFusedBatchNormBwdPrimitive : public MklPrimitive {
public:
explicit MklFusedBatchNormBwdPrimitive(const MklBatchNormBwdParams& bwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.bn_bwd == nullptr) Setup(bwdParams);
}
~MklFusedBatchNormBwdPrimitive() {}
void Execute(const T* src_data, const U* mean_data, const U* variance_data,
#ifndef ENABLE_ONEDNN_V3
const T* diff_dst_data, const U* scale_shift_data,
T* diff_src_data, U* diff_scale_shift_data, U* res_space_data,
#else
const T* diff_dst_data, const U* scale_data, T* diff_src_data,
U* diff_scale_data, U* diff_shift_data, U* res_space_data,
#endif
std::shared_ptr<stream> bwd_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *bwd_stream);
context_.mean_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(mean_data)), *bwd_stream);
context_.variance_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(variance_data)), *bwd_stream);
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)), *bwd_stream);
if (IS_SET(use_scale_shift)) {
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)), *bwd_stream);
context_.diff_scale_shift_mem->set_data_handle(
static_cast<void*>(diff_scale_shift_data), *bwd_stream);
}
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data),
*bwd_stream);
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.mean_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(mean_data)));
context_.variance_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(variance_data)));
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)));
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)));
context_.diff_scale_shift_mem->set_data_handle(
static_cast<void*>(diff_scale_shift_data));
#else
context_.scale_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_data)));
context_.diff_scale_mem->set_data_handle(
static_cast<void*>(diff_scale_data));
context_.diff_shift_mem->set_data_handle(
static_cast<void*>(diff_shift_data));
#endif
}
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data));
#endif
DCHECK_EQ(context_.bwd_primitives.size(), context_.net_args.size());
execute_primitives(context_.bwd_primitives, bwd_stream, context_.net_args);
context_.src_mem->set_data_handle(DummyData);
context_.mean_mem->set_data_handle(DummyData);
context_.variance_mem->set_data_handle(DummyData);
context_.diff_dst_mem->set_data_handle(DummyData);
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(DummyData);
context_.diff_scale_shift_mem->set_data_handle(DummyData);
#else
context_.scale_mem->set_data_handle(DummyData);
context_.diff_scale_mem->set_data_handle(DummyData);
context_.diff_shift_mem->set_data_handle(DummyData);
#endif
}
context_.diff_src_mem->set_data_handle(DummyData);
}
std::shared_ptr<BatchNormBwdPd> GetBatchNormBwdPd() const {
return context_.bwd_pd;
}
memory::desc GetDiffSrcPd() { return context_.diff_src_mem->get_desc(); }
private:
struct BatchNormBwdContext {
int64 flags;
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> mean_mem;
std::shared_ptr<dnnl::memory> variance_mem;
std::shared_ptr<dnnl::memory> diff_dst_mem;
#ifndef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::memory> scale_shift_mem;
std::shared_ptr<dnnl::memory> diff_scale_shift_mem;
#else
std::shared_ptr<dnnl::memory> scale_mem;
std::shared_ptr<dnnl::memory> diff_scale_mem;
std::shared_ptr<dnnl::memory> diff_shift_mem;
#endif
std::shared_ptr<dnnl::memory> diff_src_mem;
std::shared_ptr<BatchNormBwdPd> bwd_pd;
std::shared_ptr<dnnl::primitive> bn_bwd;
std::vector<dnnl::primitive> bwd_primitives;
std::vector<std::unordered_map<int, memory>> net_args;
BatchNormBwdContext()
: flags(0),
src_mem(nullptr),
mean_mem(nullptr),
variance_mem(nullptr),
diff_dst_mem(nullptr),
#ifndef ENABLE_ONEDNN_V3
scale_shift_mem(nullptr),
diff_scale_shift_mem(nullptr),
#else
scale_mem(nullptr),
diff_scale_mem(nullptr),
diff_shift_mem(nullptr),
#endif
diff_src_mem(nullptr) {
}
};
inline int64 GetBatchNormFlags(const MklBatchNormBwdParams& bwdParams) const {
return GET_SCALE_AND_SHIFT_FLAGS |
(bwdParams.training ? false : GET_FLAG(use_global_stats));
}
void Setup(const MklBatchNormBwdParams& bwdParams) {
context_.flags = GetBatchNormFlags(bwdParams);
auto src_md = bwdParams.src_md;
auto diff_dst_md = bwdParams.diff_dst_md;
auto variance_desc = memory::desc({1, bwdParams.depth}, MklDnnType<U>(),
memory::format_tag::nc);
auto mean_desc = memory::desc({1, bwdParams.depth}, MklDnnType<U>(),
memory::format_tag::nc);
#ifndef ENABLE_ONEDNN_V3
auto scale_shift_desc = memory::desc({2, bwdParams.depth}, MklDnnType<U>(),
memory::format_tag::nc);
#else
auto scale_shift_desc =
memory::desc({bwdParams.depth}, MklDnnType<U>(), memory::format_tag::x);
#endif
auto diff_scale_shift_desc = scale_shift_desc;
auto bn_flags = GetBatchNormFlags(bwdParams);
#ifndef ENABLE_ONEDNN_V3
auto fwd_desc = batch_normalization_forward::desc(
prop_kind::forward_training, src_md, bwdParams.eps,
static_cast<dnnl::normalization_flags>(bn_flags));
auto fwd_pd = BatchNormFwdPd(fwd_desc, cpu_engine_);
auto bwd_desc = batch_normalization_backward::desc(
prop_kind::backward, diff_dst_md, src_md, bwdParams.eps,
static_cast<dnnl::normalization_flags>(bn_flags));
context_.bwd_pd.reset(new BatchNormBwdPd(bwd_desc, cpu_engine_, fwd_pd));
#else
auto dst_md = bwdParams.dst_md;
auto diff_src_md = bwdParams.diff_src_md;
auto fwd_pd = BatchNormFwdPd(
cpu_engine_, prop_kind::forward_training, src_md, dst_md, bwdParams.eps,
static_cast<dnnl::normalization_flags>(bn_flags));
context_.bwd_pd.reset(new BatchNormBwdPd(
cpu_engine_, prop_kind::backward, diff_src_md, diff_dst_md, src_md,
bwdParams.eps, static_cast<dnnl::normalization_flags>(bn_flags),
fwd_pd));
#endif
context_.src_mem.reset(new memory(src_md, cpu_engine_, DummyData));
context_.diff_dst_mem.reset(
new memory(diff_dst_md, cpu_engine_, DummyData));
context_.variance_mem.reset(
new memory(variance_desc, cpu_engine_, DummyData));
context_.mean_mem.reset(new memory(mean_desc, cpu_engine_, DummyData));
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem.reset(
new memory(scale_shift_desc, cpu_engine_, DummyData));
context_.diff_scale_shift_mem.reset(
new memory(diff_scale_shift_desc, cpu_engine_, DummyData));
#else
context_.scale_mem.reset(
new memory(scale_shift_desc, cpu_engine_, DummyData));
context_.diff_scale_mem.reset(
new memory(diff_scale_shift_desc, cpu_engine_, DummyData));
context_.diff_shift_mem.reset(
new memory(diff_scale_shift_desc, cpu_engine_, DummyData));
#endif
context_.diff_src_mem.reset(new memory(src_md, cpu_engine_, DummyData));
context_.bn_bwd.reset(new batch_normalization_backward(*context_.bwd_pd));
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_DIFF_DST, *context_.diff_dst_mem},
{DNNL_ARG_DIFF_SRC, *context_.diff_src_mem},
#ifndef ENABLE_ONEDNN_V3
{DNNL_ARG_SCALE_SHIFT, *context_.scale_shift_mem},
{ DNNL_ARG_DIFF_SCALE_SHIFT,
*context_.diff_scale_shift_mem }});
#else
{DNNL_ARG_SCALE, *context_.scale_mem},
{DNNL_ARG_DIFF_SCALE, *context_.diff_scale_mem},
{DNNL_ARG_DIFF_SHIFT, *context_.diff_shift_mem}});
#endif
context_.bwd_primitives.push_back(*context_.bn_bwd);
}
struct BatchNormBwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T, typename U>
class MklFusedBatchNormBwdPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklFusedBatchNormBwdPrimitive<T, U>* Get(
const MklBatchNormBwdParams& bwdParams) {
auto bn_bwd = static_cast<MklFusedBatchNormBwdPrimitive<T, U>*>(
MklFusedBatchNormBwdPrimitiveFactory<T, U>::GetInstance()
.GetBatchNormBwd(bwdParams));
if (bn_bwd == nullptr) {
bn_bwd = new MklFusedBatchNormBwdPrimitive<T, U>(bwdParams);
MklFusedBatchNormBwdPrimitiveFactory<T, U>::GetInstance().SetBatchNormBwd(
bwdParams, bn_bwd);
}
return bn_bwd;
}
static MklFusedBatchNormBwdPrimitiveFactory& GetInstance() {
static MklFusedBatchNormBwdPrimitiveFactory instance_;
return instance_;
}
private:
MklFusedBatchNormBwdPrimitiveFactory() {}
~MklFusedBatchNormBwdPrimitiveFactory() {}
static string CreateKey(const MklBatchNormBwdParams& bwdParams) {
string prefix = "bn_bwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(bwdParams.src_dims);
key_creator.AddAsKey(bwdParams.diff_dst_dims);
key_creator.AddAsKey<int>(bwdParams.depth);
key_creator.AddAsKey<float>(bwdParams.eps);
key_creator.AddAsKey<bool>(bwdParams.training);
key_creator.AddAsKey<TensorFormat>(bwdParams.data_format);
key_creator.AddAsKey(typeid(T).name());
key_creator.AddAsKey(typeid(U).name());
return key_creator.GetKey();
}
MklPrimitive* GetBatchNormBwd(const MklBatchNormBwdParams& bwdParams) {
string key = CreateKey(bwdParams);
return this->GetOp(key);
}
void SetBatchNormBwd(const MklBatchNormBwdParams& bwdParams,
MklPrimitive* op) {
string key = CreateKey(bwdParams);
this->SetOp(key, op);
}
};
template <typename Device, typename T, typename U, bool reserved_space,
bool is_batch_norm_ex = false, bool native_format = false>
class MklFusedBatchNormOp : public OpKernel {
public:
explicit MklFusedBatchNormOp(OpKernelConstruction* context)
: OpKernel(context) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
epsilon_ = epsilon;
float exponential_avg_factor;
OP_REQUIRES_OK(context, context->GetAttr("exponential_avg_factor",
&exponential_avg_factor));
exponential_avg_factor_ = static_cast<U>(exponential_avg_factor);
string tensor_format;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format));
OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_),
absl::InvalidArgumentError("Invalid data format"));
OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_));
depth_ = 0;
mean_values_ = nullptr;
variance_values_ = nullptr;
if (!is_batch_norm_ex) {
activation_mode_ = FusedBNActivationMode::kIdentity;
} else {
int num_side_inputs;
OP_REQUIRES_OK(context,
context->GetAttr("num_side_inputs", &num_side_inputs));
OP_REQUIRES(context, num_side_inputs == 0,
absl::InvalidArgumentError(
"_MKLFusedBatchNorm do not support side input now."));
OP_REQUIRES_OK(context, ParseActivationMode(context, &activation_mode_));
OP_REQUIRES(context, activation_mode_ == FusedBNActivationMode::kRelu,
absl::InvalidArgumentError(
"_MKLFusedBatchNorm only support Relu activation"));
}
}
void Compute(OpKernelContext* context) override {
try {
const size_t kSrcIndex = 0;
const size_t kScaleIndex = 1;
const size_t kShiftIndex = 2;
const size_t kMeanIndex = 3;
const size_t kVarianceIndex = 4;
const Tensor& src_tensor = MklGetInput(context, kSrcIndex);
const Tensor& scale_tensor = MklGetInput(context, kScaleIndex);
const Tensor& shift_tensor = MklGetInput(context, kShiftIndex);
const Tensor& est_mean_tensor = MklGetInput(context, kMeanIndex);
const Tensor& est_variance_tensor = MklGetInput(context, kVarianceIndex);
TensorShape tf_shape_src;
MklDnnShape dnn_shape_src;
GetMklShape(context, kSrcIndex, &dnn_shape_src, native_format);
if (dnn_shape_src.IsMklTensor()) {
tf_shape_src = dnn_shape_src.GetTfShape();
OP_REQUIRES(context, dnn_shape_src.GetDimension() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
} else {
tf_shape_src = src_tensor.shape();
OP_REQUIRES(context, src_tensor.dims() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
}
OP_REQUIRES(context, scale_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("scale must be 1-dimensional",
scale_tensor.shape().DebugString())));
OP_REQUIRES(context, shift_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("offset must be 1-dimensional",
shift_tensor.shape().DebugString())));
OP_REQUIRES(context, est_mean_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("estimated_mean must be 1-dimensional",
est_mean_tensor.shape().DebugString())));
OP_REQUIRES(context, est_variance_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("estimated_variance must be 1-dimensional",
est_variance_tensor.shape().DebugString())));
int num_channels;
if (dnn_shape_src.IsMklTensor()) {
num_channels = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
} else {
num_channels = GetTensorDim(src_tensor, tensor_format_, 'C');
}
OP_REQUIRES(context, scale_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"scale must have the same number of elements "
"as the channels of x, got ",
scale_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(context, shift_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"offset must have the same number of elements "
"as the channels of x, got ",
shift_tensor.NumElements(), " and ", num_channels)));
if (!is_training_ || exponential_avg_factor_ != 1.) {
std::string prefix_msg = is_training_
? "When exponential_avg_factor != 1"
: "When is_training=false";
OP_REQUIRES(context, est_mean_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
prefix_msg,
", mean must have the same number "
"of elements as the channels of x, got ",
est_mean_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(
context, est_variance_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
prefix_msg,
", variance must have the same "
"number of elements as the channels of x, got ",
est_variance_tensor.NumElements(), " and ", num_channels)));
}
Tensor* dst_tensor = nullptr;
TensorShape workspace_tf_shape;
if (tf_shape_src.num_elements() == 0) {
size_t workspace_bytes = 0;
workspace_tf_shape.AddDim(workspace_bytes);
HandleEmptyInput(context, tf_shape_src, workspace_tf_shape,
scale_tensor.shape(), &dst_tensor);
return;
}
if (dnn_shape_src.IsMklTensor())
depth_ = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
else
ExtractParams(context);
const size_t kDstIndex = 0;
Tensor* batch_mean_tensor = nullptr;
Tensor* batch_variance_tensor = nullptr;
Tensor* saved_mean_tensor = nullptr;
Tensor* saved_variance_tensor = nullptr;
Tensor* reserved_space_tensor = nullptr;
MklDnnData<T> src(&cpu_engine_);
#ifndef ENABLE_ONEDNN_V3
MklDnnData<U> scale_shift(&cpu_engine_);
#else
MklDnnData<U> scale(&cpu_engine_);
MklDnnData<U> shift(&cpu_engine_);
#endif
MklDnnData<U> wksp(&cpu_engine_);
memory::format_tag dnn_fmt;
MklTensorFormat mkl_tensor_fmt;
if (dnn_shape_src.IsMklTensor()) {
if (dnn_shape_src.IsTensorInNCHWFormat()) {
dnn_fmt = memory::format_tag::nchw;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NCHW;
} else {
dnn_fmt = memory::format_tag::nhwc;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NHWC;
}
} else {
mkl_tensor_fmt = TFDataFormatToMklDnnDataFormat(tensor_format_);
dnn_fmt = MklTensorFormatToMklDnnDataFormat(mkl_tensor_fmt);
}
memory::dims src_dims =
dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDimsInNCHW(src_tensor.shape(), tensor_format_);
auto src_md = dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetMklLayout()
: memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
#ifdef ENABLE_ONEDNN_V3
auto dst_md = memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
#endif
MklBatchNormFwdParams fwdParams(src_dims, depth_, epsilon_, is_training_,
#ifndef ENABLE_ONEDNN_V3
tensor_format_, src_md, activation_mode_);
#else
tensor_format_, src_md, dst_md,
activation_mode_);
#endif
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklFusedBatchNormFwdPrimitive<T, U>* bn_fwd =
MklFusedBatchNormFwdPrimitiveFactory<T, U>::Get(fwdParams);
U* ws_data = nullptr;
if (fwdParams.activation_mode == FusedBNActivationMode::kRelu) {
memory::desc workspace_md =
bn_fwd->GetBatchNormFwdPd()->workspace_desc();
size_t workspace_bytes = workspace_md.get_size();
workspace_tf_shape.AddDim(workspace_bytes);
AllocateTFOutputs(context, scale_tensor.shape(), workspace_tf_shape,
&batch_mean_tensor, &batch_variance_tensor,
&saved_mean_tensor, &saved_variance_tensor,
&reserved_space_tensor);
if (reserved_space) {
wksp.SetUsrMem(workspace_md, reserved_space_tensor);
ws_data = static_cast<U*>(wksp.GetOpMem().get_data_handle());
}
} else {
size_t workspace_bytes = 0;
workspace_tf_shape.AddDim(workspace_bytes);
AllocateTFOutputs(context, scale_tensor.shape(), workspace_tf_shape,
&batch_mean_tensor, &batch_variance_tensor,
&saved_mean_tensor, &saved_variance_tensor,
&reserved_space_tensor);
}
if (is_training_)
SetMeanVariance(*batch_mean_tensor, *batch_variance_tensor);
else
SetMeanVariance(est_mean_tensor, est_variance_tensor);
#ifndef ENABLE_ONEDNN_V3
scale_shift.AllocateBuffer(2 * depth_ * sizeof(U));
U* scale_shift_data =
reinterpret_cast<U*>(scale_shift.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
const U* shift_tf = shift_tensor.flat<U>().data();
std::memcpy(scale_shift_data, scale_tf, depth_ * sizeof(U));
std::memcpy(scale_shift_data + depth_, shift_tf, depth_ * sizeof(U));
#else
scale.AllocateBuffer(depth_ * sizeof(U));
U* scale_data = reinterpret_cast<U*>(scale.GetAllocatedBuffer());
shift.AllocateBuffer(depth_ * sizeof(U));
U* shift_data = reinterpret_cast<U*>(shift.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
const U* shift_tf = shift_tensor.flat<U>().data();
std::memcpy(scale_data, scale_tf, depth_ * sizeof(U));
std::memcpy(shift_data, shift_tf, depth_ * sizeof(U));
#endif
char* saved_mean_data_tf =
reinterpret_cast<char*>(saved_mean_tensor->flat<U>().data());
std::memcpy(saved_mean_data_tf, reinterpret_cast<char*>(mean_values_),
depth_ * sizeof(U));
char* saved_variance_data_tf =
reinterpret_cast<char*>(saved_variance_tensor->flat<U>().data());
std::memcpy(saved_variance_data_tf,
reinterpret_cast<char*>(variance_values_),
depth_ * sizeof(U));
const T* src_data = nullptr;
std::shared_ptr<BatchNormFwdPd> bn_fwd_pd = bn_fwd->GetBatchNormFwdPd();
if (!native_format && src_md != bn_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(bn_fwd_pd->src_desc(), cpu_engine_, context);
src_data = static_cast<T*>(src.GetOpMem().get_data_handle());
} else {
src_data = static_cast<T*>(const_cast<T*>(src_tensor.flat<T>().data()));
}
MklDnnShape dnn_shape_dst;
TensorShape tf_shape_dst;
dnn_shape_dst.SetMklTensor(true);
auto dst_pd = bn_fwd->GetDstPd();
dnn_shape_dst.SET_MKL_LAYOUT(dst_pd);
dnn_shape_dst.SetElemType(MklDnnType<T>());
auto ndims = dnn_shape_src.IsMklTensor() ? dnn_shape_src.GetDimension()
: src_tensor.shape().dims();
dnn_shape_dst.SetTfLayout(ndims, src_dims, mkl_tensor_fmt);
tf_shape_dst.AddDim(dst_pd.get_size() / sizeof(T));
if (native_format) {
tf_shape_dst = dnn_shape_dst.GetTfShape();
}
AllocateOutputSetMklShape(context, kDstIndex, &dst_tensor, tf_shape_dst,
dnn_shape_dst, native_format);
#ifndef ENABLE_ONEDNN_V3
U* scale_shift_op_data = scale_shift_data;
#else
U* scale_op_data = scale_data;
U* shift_op_data = shift_data;
#endif
U* mean_op_data = saved_mean_tensor->flat<U>().data();
U* variance_op_data = saved_variance_tensor->flat<U>().data();
T* dst_data = dst_tensor->flat<T>().data();
std::shared_ptr<stream> fwd_cpu_stream;
fwd_cpu_stream.reset(CreateStream(&eigen_tp, bn_fwd->GetEngine()));
#ifndef ENABLE_ONEDNN_V3
bn_fwd->Execute(src_data, scale_shift_op_data, dst_data, mean_op_data,
#else
bn_fwd->Execute(src_data, scale_op_data, shift_op_data, dst_data,
mean_op_data,
#endif
variance_op_data, fwd_cpu_stream, ws_data);
float adjust_factor = 1.0;
if (is_training_) {
size_t orig_size = src_dims[0] * src_dims[2] * src_dims[3];
size_t adjust_size = (orig_size > 1) ? (orig_size - 1) : 1;
adjust_factor = (static_cast<float>(orig_size)) / adjust_size;
}
auto mean_data = reinterpret_cast<U*>(saved_mean_data_tf);
auto variance_data = reinterpret_cast<U*>(saved_variance_data_tf);
auto batch_mean_data = batch_mean_tensor->flat<U>().data();
auto batch_variance_data = batch_variance_tensor->flat<U>().data();
auto est_mean_data = est_mean_tensor.flat<U>().data();
auto est_variance_data = est_variance_tensor.flat<U>().data();
if (is_training_) {
if (exponential_avg_factor_ == U(1.0)) {
for (int k = 0; k < depth_; k++) {
batch_mean_data[k] = mean_data[k];
batch_variance_data[k] =
static_cast<U>(adjust_factor) * variance_data[k];
}
} else {
U one_minus_factor = U(1.0) - exponential_avg_factor_;
for (int k = 0; k < depth_; k++) {
batch_mean_data[k] = one_minus_factor * est_mean_data[k] +
exponential_avg_factor_ * mean_data[k];
batch_variance_data[k] = one_minus_factor * est_variance_data[k] +
exponential_avg_factor_ *
static_cast<U>(adjust_factor) *
variance_data[k];
}
}
} else {
std::memcpy(batch_mean_data, mean_data, depth_ * sizeof(U));
std::memcpy(batch_variance_data, variance_data, depth_ * sizeof(U));
}
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
}
private:
float epsilon_;
U exponential_avg_factor_;
TensorFormat tensor_format_;
bool is_training_;
U* mean_values_;
U* variance_values_;
size_t depth_;
FusedBNActivationMode activation_mode_;
engine cpu_engine_ = engine(engine::kind::cpu, 0);
void ExtractParams(OpKernelContext* context) {
const Tensor& input = MklGetInput(context, 0);
depth_ = static_cast<int>(GetTensorDim(input, tensor_format_, 'C'));
}
void SetMeanVariance(const Tensor& mean, const Tensor& variance) {
mean_values_ = reinterpret_cast<U*>(const_cast<U*>(mean.flat<U>().data()));
variance_values_ =
reinterpret_cast<U*>(const_cast<U*>(variance.flat<U>().data()));
}
void HandleEmptyInput(OpKernelContext* context, TensorShape tf_shape_src,
TensorShape workspace_tf_shape,
TensorShape tf_shape_scale, Tensor** dst_tensor) {
DCHECK(dst_tensor);
const size_t kDstIndex = 0;
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDstIndex, dst_tensor, tf_shape_src,
dnn_shape_dst, native_format);
DCHECK(*dst_tensor);
memset(const_cast<char*>((*dst_tensor)->tensor_data().data()), 0,
(*dst_tensor)->tensor_data().size());
Tensor* batch_mean_tensor = nullptr;
Tensor* batch_variance_tensor = nullptr;
Tensor* saved_mean_tensor = nullptr;
Tensor* saved_variance_tensor = nullptr;
Tensor* reserved_space_tensor = nullptr;
AllocateTFOutputs(context, tf_shape_scale, workspace_tf_shape,
&batch_mean_tensor, &batch_variance_tensor,
&saved_mean_tensor, &saved_variance_tensor,
&reserved_space_tensor);
}
void AllocateTFOutputs(OpKernelContext* context, TensorShape tf_shape_scale,
TensorShape workspace_tf_shape,
Tensor** batch_mean_tensor,
Tensor** batch_variance_tensor,
Tensor** saved_mean_tensor,
Tensor** saved_variance_tensor,
Tensor** reserved_space_tensor) {
DCHECK(batch_mean_tensor);
DCHECK(batch_variance_tensor);
DCHECK(saved_mean_tensor);
DCHECK(saved_variance_tensor);
const size_t kBatchMeanIndex = 1;
const size_t kBatchVarianceIndex = 2;
const size_t kSavedMeanIndex = 3;
const size_t kSavedVarianceIndex = 4;
const size_t kReservedSpaceIndex = 5;
MklDnnShape mkl_shape_batch_mean;
mkl_shape_batch_mean.SetMklTensor(false);
AllocateOutputSetMklShape(context, kBatchMeanIndex, batch_mean_tensor,
tf_shape_scale, mkl_shape_batch_mean,
native_format);
DCHECK(*batch_mean_tensor);
int num_elements = tf_shape_scale.num_elements();
auto batch_mean_data = (*batch_mean_tensor)->flat<U>().data();
std::fill_n(batch_mean_data, num_elements, static_cast<U>(NAN));
MklDnnShape mkl_shape_batch_variance;
mkl_shape_batch_variance.SetMklTensor(false);
AllocateOutputSetMklShape(context, kBatchVarianceIndex,
batch_variance_tensor, tf_shape_scale,
mkl_shape_batch_variance, native_format);
DCHECK(*batch_variance_tensor);
auto batch_variance_data = (*batch_variance_tensor)->flat<U>().data();
std::fill_n(batch_variance_data, num_elements, static_cast<U>(NAN));
MklDnnShape mkl_shape_saved_mean;
mkl_shape_saved_mean.SetMklTensor(false);
AllocateOutputSetMklShape(context, kSavedMeanIndex, saved_mean_tensor,
tf_shape_scale, mkl_shape_saved_mean,
native_format);
DCHECK(*saved_mean_tensor);
auto saved_mean_data = (*saved_mean_tensor)->flat<U>().data();
std::fill_n(saved_mean_data, num_elements, static_cast<U>(0));
MklDnnShape mkl_shape_saved_variance;
mkl_shape_saved_variance.SetMklTensor(false);
AllocateOutputSetMklShape(context, kSavedVarianceIndex,
saved_variance_tensor, tf_shape_scale,
mkl_shape_saved_variance, native_format);
DCHECK(*saved_variance_tensor);
auto saved_variance_data = (*saved_variance_tensor)->flat<U>().data();
std::fill_n(saved_variance_data, num_elements, static_cast<U>(0));
if (reserved_space) {
DCHECK(reserved_space_tensor != nullptr);
MklDnnShape mkl_shape_reserved_space;
mkl_shape_reserved_space.SetMklTensor(false);
AllocateOutputSetMklShape(context, kReservedSpaceIndex,
reserved_space_tensor, workspace_tf_shape,
mkl_shape_reserved_space, native_format);
DCHECK((*reserved_space_tensor) != nullptr);
}
}
};
template <typename Device, typename T, typename U, bool reserved_space,
bool native_format = false>
class MklFusedBatchNormGradOp : public OpKernel {
public:
explicit MklFusedBatchNormGradOp(OpKernelConstruction* context)
: OpKernel(context) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
epsilon_ = epsilon;
string tensor_format;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format));
OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_),
absl::InvalidArgumentError("Invalid data format"));
OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_));
depth_ = 0;
}
void Compute(OpKernelContext* context) override {
try {
const size_t kDiffDstIndex = 0;
const size_t kSrcIndex = 1;
const size_t kScaleIndex = 2;
const size_t kMeanIndex = 3;
const size_t kVarianceIndex = 4;
const size_t kReservedSpaceIndex = 5;
const Tensor& diff_dst_tensor = MklGetInput(context, kDiffDstIndex);
const Tensor& src_tensor = MklGetInput(context, kSrcIndex);
const Tensor& scale_tensor = MklGetInput(context, kScaleIndex);
const Tensor& saved_mean_tensor = MklGetInput(context, kMeanIndex);
const Tensor& saved_variance_tensor =
MklGetInput(context, kVarianceIndex);
const Tensor& reserved_space_tensor =
(reserved_space) ? MklGetInput(context, kReservedSpaceIndex)
: Tensor();
MklDnnShape dnn_shape_src, dnn_shape_diff_dst;
GetMklShape(context, kSrcIndex, &dnn_shape_src, native_format);
GetMklShape(context, kDiffDstIndex, &dnn_shape_diff_dst, native_format);
TensorShape tf_shape_src, tf_shape_diff_dst;
if (dnn_shape_diff_dst.IsMklTensor()) {
tf_shape_diff_dst = dnn_shape_diff_dst.GetTfShape();
OP_REQUIRES(context, dnn_shape_diff_dst.GetDimension() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
diff_dst_tensor.shape().DebugString())));
} else {
tf_shape_diff_dst = diff_dst_tensor.shape();
OP_REQUIRES(context, diff_dst_tensor.dims() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
diff_dst_tensor.shape().DebugString())));
}
if (dnn_shape_src.IsMklTensor()) {
tf_shape_src = dnn_shape_src.GetTfShape();
OP_REQUIRES(context, dnn_shape_src.GetDimension() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
} else {
tf_shape_src = src_tensor.shape();
OP_REQUIRES(context, src_tensor.dims() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
}
OP_REQUIRES(context, scale_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("scale must be 1-dimensional",
scale_tensor.shape().DebugString())));
OP_REQUIRES(context, saved_mean_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("saved mean must be 1-dimensional",
saved_mean_tensor.shape().DebugString())));
OP_REQUIRES(context, saved_variance_tensor.dims() == 1,
absl::InvalidArgumentError(absl::StrCat(
"saved variance must be 1-dimensional",
saved_variance_tensor.shape().DebugString())));
OP_REQUIRES(
context, tf_shape_src == tf_shape_diff_dst,
absl::InvalidArgumentError(absl::StrCat(
"x and y_backprop must have same shape, but x has shape ",
src_tensor.shape().DebugString(), " and y_backprop has shape ",
diff_dst_tensor.shape().DebugString())));
int num_channels;
if (dnn_shape_src.IsMklTensor()) {
num_channels = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
} else {
num_channels = GetTensorDim(src_tensor, tensor_format_, 'C');
}
OP_REQUIRES(context, scale_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"scale must have the same number of elements "
"as the channels of x, got ",
scale_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(context, saved_mean_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"reserve_space_1 must have the same number of "
"elements as the channels of x, got ",
saved_mean_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(
context, saved_variance_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"reserve_space_2 must have the same number of "
"elements as the channels of x, got ",
saved_variance_tensor.NumElements(), " and ", num_channels)));
Tensor* diff_src_tensor = nullptr;
if (tf_shape_src.num_elements() == 0 ||
tf_shape_diff_dst.num_elements() == 0) {
HandleEmptyInput(context, tf_shape_src, scale_tensor.shape(),
&diff_src_tensor);
return;
}
if (dnn_shape_src.IsMklTensor()) {
depth_ = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
} else if (dnn_shape_diff_dst.IsMklTensor()) {
depth_ = dnn_shape_diff_dst.DimSize(MklDnnDims::Dim_C);
} else {
ExtractParams(context);
}
memory::format_tag dnn_fmt;
MklTensorFormat mkl_tensor_fmt;
if (dnn_shape_src.IsMklTensor()) {
if (dnn_shape_src.IsTensorInNCHWFormat()) {
dnn_fmt = memory::format_tag::nchw;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NCHW;
} else {
dnn_fmt = memory::format_tag::nhwc;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NHWC;
}
} else {
mkl_tensor_fmt = TFDataFormatToMklDnnDataFormat(tensor_format_);
dnn_fmt = MklTensorFormatToMklDnnDataFormat(mkl_tensor_fmt);
}
MklDnnData<T> src(&cpu_engine_);
MklDnnData<T> diff_dst(&cpu_engine_);
#ifndef ENABLE_ONEDNN_V3
MklDnnData<U> scale_shift(&cpu_engine_);
MklDnnData<U> diff_scale_shift(&cpu_engine_);
#else
MklDnnData<U> scale(&cpu_engine_);
MklDnnData<U> diff_scale(&cpu_engine_);
MklDnnData<U> diff_shift(&cpu_engine_);
#endif
memory::dims src_dims =
dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDimsInNCHW(src_tensor.shape(), tensor_format_);
memory::dims diff_dst_dims =
dnn_shape_diff_dst.IsMklTensor()
? dnn_shape_diff_dst.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDimsInNCHW(diff_dst_tensor.shape(),
tensor_format_);
memory::desc src_md =
dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetMklLayout()
: memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
memory::desc diff_dst_md =
dnn_shape_diff_dst.IsMklTensor()
? dnn_shape_diff_dst.GetMklLayout()
: memory::desc(diff_dst_dims, MklDnnType<T>(), dnn_fmt);
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md = memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
memory::desc diff_src_md =
memory::desc(diff_dst_dims, MklDnnType<T>(), dnn_fmt);
#endif
MklDnnData<T> reorder_src(&cpu_engine_);
MklDnnData<T> reorder_diff_dst(&cpu_engine_);
T* diff_dst_data =
static_cast<T*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
T* src_data =
static_cast<T*>(const_cast<T*>(src_tensor.flat<T>().data()));
if (!native_format) {
if (dnn_shape_src.IsMklTensor() && !dnn_shape_diff_dst.IsMklTensor()) {
reorder_diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
reorder_diff_dst.CheckReorderToOpMem(src_md, cpu_engine_, context);
diff_dst_md = src_md;
diff_dst_data =
static_cast<T*>(reorder_diff_dst.GetOpMem().get_data_handle());
} else if (!dnn_shape_src.IsMklTensor() &&
dnn_shape_diff_dst.IsMklTensor()) {
reorder_src.SetUsrMem(src_md, &src_tensor);
reorder_src.CheckReorderToOpMem(diff_dst_md, cpu_engine_, context);
src_md = diff_dst_md;
src_data = static_cast<T*>(reorder_src.GetOpMem().get_data_handle());
}
}
#ifndef ENABLE_ONEDNN_V3
scale_shift.AllocateBuffer(2 * depth_ * sizeof(U));
U* scale_shift_data_tf =
reinterpret_cast<U*>(scale_shift.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
for (int k = 0; k < depth_; k++) {
scale_shift_data_tf[k] = scale_tf[k];
scale_shift_data_tf[k + depth_] = static_cast<U>(0);
}
diff_scale_shift.AllocateBuffer(2 * depth_ * sizeof(U));
#else
scale.AllocateBuffer(depth_ * sizeof(U));
U* scale_data_tf = reinterpret_cast<U*>(scale.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
std::memcpy(scale_data_tf, scale_tf, depth_ * sizeof(U));
diff_scale.AllocateBuffer(depth_ * sizeof(U));
diff_shift.AllocateBuffer(depth_ * sizeof(U));
#endif
MklBatchNormBwdParams bwdParams(src_dims, diff_dst_dims, depth_, epsilon_,
is_training_, tensor_format_, src_md,
#ifdef ENABLE_ONEDNN_V3
dst_md, diff_src_md,
#endif
diff_dst_md);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklFusedBatchNormBwdPrimitive<T, U>* bn_bwd =
MklFusedBatchNormBwdPrimitiveFactory<T, U>::Get(bwdParams);
std::shared_ptr<BatchNormBwdPd> bn_bwd_pd = bn_bwd->GetBatchNormBwdPd();
if (!native_format && diff_dst_md != bn_bwd_pd->diff_dst_desc()) {
diff_dst.SetUsrMem(diff_dst_md, diff_dst_data);
diff_dst.CheckReorderToOpMem(bn_bwd_pd->diff_dst_desc(), cpu_engine_,
context);
diff_dst_data = static_cast<T*>(diff_dst.GetOpMem().get_data_handle());
}
if (!native_format && (src_md != bn_bwd_pd->src_desc())) {
src.SetUsrMem(src_md, src_data);
src.CheckReorderToOpMem(bn_bwd_pd->src_desc(), cpu_engine_, context);
src_data = static_cast<T*>(src.GetOpMem().get_data_handle());
}
const size_t kDiffSrcIndex = 0;
MklDnnShape dnn_shape_diff_src;
TensorShape tf_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(true);
auto diff_src_pd = bn_bwd->GetDiffSrcPd();
dnn_shape_diff_src.SET_MKL_LAYOUT(diff_src_pd);
dnn_shape_diff_src.SetElemType(MklDnnType<T>());
dnn_shape_diff_src.SetTfLayout(src_dims.size(), src_dims, mkl_tensor_fmt);
dnn_shape_diff_src.SetTfDimOrder(src_dims.size(), tensor_format_);
tf_shape_diff_src.AddDim(diff_src_pd.get_size() / sizeof(T));
if (native_format) {
tf_shape_diff_src = dnn_shape_diff_src.GetTfShape();
}
AllocateOutputSetMklShape(context, kDiffSrcIndex, &diff_src_tensor,
tf_shape_diff_src, dnn_shape_diff_src,
native_format);
U* mean_data =
static_cast<U*>(const_cast<U*>(saved_mean_tensor.flat<U>().data()));
U* variance_data = static_cast<U*>(
const_cast<U*>(saved_variance_tensor.flat<U>().data()));
#ifndef ENABLE_ONEDNN_V3
U* scale_shift_data = scale_shift_data_tf;
U* diff_scale_shift_data =
static_cast<U*>(diff_scale_shift.GetAllocatedBuffer());
#else
U* scale_data = scale_data_tf;
U* diff_scale_data = static_cast<U*>(diff_scale.GetAllocatedBuffer());
U* diff_shift_data = static_cast<U*>(diff_shift.GetAllocatedBuffer());
#endif
T* diff_src_data = static_cast<T*>(diff_src_tensor->flat<T>().data());
U* res_space_data =
((reserved_space) ? static_cast<U*>(const_cast<U*>(
reserved_space_tensor.flat<U>().data()))
: nullptr);
std::shared_ptr<stream> bwd_cpu_stream;
bwd_cpu_stream.reset(CreateStream(&eigen_tp, bn_bwd->GetEngine()));
bn_bwd->Execute(src_data, mean_data, variance_data, diff_dst_data,
GET_SCALE_DATA_BUFFER, diff_src_data,
GET_DIFF_SCALE_SHIFT_DATA_BUFFERS, res_space_data,
bwd_cpu_stream);
Tensor* diff_scale_tensor = nullptr;
Tensor* diff_shift_tensor = nullptr;
AllocateTFOutputs(context, scale_tensor.shape(), &diff_scale_tensor,
&diff_shift_tensor);
auto diff_scale_data_out = diff_scale_tensor->flat<U>().data();
auto diff_shift_data_out = diff_shift_tensor->flat<U>().data();
std::memcpy(reinterpret_cast<char*>(diff_scale_data_out),
reinterpret_cast<char*>(GET_DIFF_SCALE_DATA_BUFFER),
depth_ * sizeof(U));
std::memcpy(reinterpret_cast<char*>(diff_shift_data_out),
reinterpret_cast<char*>(GET_DIFF_SHIFT_DATA_BUFFER),
depth_ * sizeof(U));
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
}
private:
float epsilon_;
TensorFormat tensor_format_;
size_t depth_;
bool is_training_;
engine cpu_engine_ = engine(engine::kind::cpu, 0);
void ExtractParams(OpKernelContext* context) {
const Tensor& input = MklGetInput(context, 0);
depth_ = static_cast<int>(GetTensorDim(input, tensor_format_, 'C'));
}
void HandleEmptyInput(OpKernelContext* context, TensorShape tf_shape_src,
TensorShape tf_shape_scale_shift,
Tensor** diff_src_tensor) {
const size_t kDiffSrcIndex = 0;
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDiffSrcIndex, diff_src_tensor,
tf_shape_src, dnn_shape_diff_src, native_format);
auto diff_src_data = (*diff_src_tensor)->flat<T>().data();
std::fill_n(diff_src_data, (*diff_src_tensor)->shape().num_elements(),
static_cast<T>(0));
Tensor* diff_scale_tensor = nullptr;
Tensor* diff_shift_tensor = nullptr;
AllocateTFOutputs(context, tf_shape_scale_shift, &diff_scale_tensor,
&diff_shift_tensor);
}
void AllocateTFOutputs(OpKernelContext* context,
TensorShape tf_shape_scale_shift,
Tensor** diff_scale_tensor,
Tensor** diff_shift_tensor) {
DCHECK(diff_scale_tensor);
DCHECK(diff_shift_tensor);
const size_t kDiffScaleIndex = 1;
const size_t kDiffShiftIndex = 2;
const size_t kP1Index = 3;
const size_t kP2Index = 4;
MklDnnShape mkl_shape_diff_scale;
mkl_shape_diff_scale.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDiffScaleIndex, diff_scale_tensor,
tf_shape_scale_shift, mkl_shape_diff_scale,
native_format);
DCHECK(*diff_scale_tensor);
auto diff_scale_data = (*diff_scale_tensor)->flat<U>().data();
std::fill_n(diff_scale_data, (*diff_scale_tensor)->shape().num_elements(),
static_cast<U>(0));
MklDnnShape mkl_shape_diff_shift;
mkl_shape_diff_shift.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDiffShiftIndex, diff_shift_tensor,
tf_shape_scale_shift, mkl_shape_diff_shift,
native_format);
DCHECK(*diff_shift_tensor);
auto diff_shift_data = (*diff_shift_tensor)->flat<U>().data();
std::fill_n(diff_shift_data, (*diff_shift_tensor)->shape().num_elements(),
static_cast<U>(0));
Tensor *p1_tensor = nullptr, *p2_tensor = nullptr;
MklDnnShape mkl_shape_p;
mkl_shape_p.SetMklTensor(false);
AllocateOutputSetMklShape(context, kP1Index, &p1_tensor, TensorShape({}),
mkl_shape_p, native_format);
std::fill_n(p1_tensor->flat<U>().data(), p1_tensor->shape().num_elements(),
static_cast<U>(0));
AllocateOutputSetMklShape(context, kP2Index, &p2_tensor, TensorShape({}),
mkl_shape_p, native_format);
std::fill_n(p2_tensor->flat<U>().data(), p2_tensor->shape().num_elements(),
static_cast<U>(0));
}
memory::dims GetMeanVarianceDims() { return memory::dims({1, depth_}); }
};
#define REGISTER_MKL_FUSED_BATCHNORM_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNorm") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, T, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNorm") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, T, false, false, true>);
TF_CALL_float(REGISTER_MKL_FUSED_BATCHNORM_CPU);
TF_CALL_bfloat16(REGISTER_MKL_FUSED_BATCHNORM_CPU);
TF_CALL_half(REGISTER_MKL_FUSED_BATCHNORM_CPU);
#undef REGISTER_MKL_FUSED_BATCHNORM_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, false, false, true>);
REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(bfloat16, float);
REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(Eigen::half, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_V2_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, T, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, T, false, true>);
TF_CALL_float(REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU);
TF_CALL_bfloat16(REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU);
#undef REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormGradV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormGradV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, false, true>);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(bfloat16, float);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(Eigen::half, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormEx") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormEx") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, true, true>);
REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(bfloat16, float);
REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(Eigen::half, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_V3_CPU
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
NoOp);
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx")
.Device(DEVICE_CPU)
.TypeConstraint<bfloat16>("T")
.TypeConstraint<float>("U"),
NoOp);
#define REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormGradV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormGradV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, true, true>);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU(bfloat16, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU
}
#undef FORWARD_INFERENCE
#undef GET_DIFF_SCALE_DATA_BUFFER
#undef GET_DIFF_SCALE_SHIFT_DATA_BUFFERS
#undef GET_DIFF_SHIFT_DATA_BUFFER
#undef GET_SCALE_AND_SHIFT_FLAGS
#undef GET_SCALE_DATA_BUFFER
#undef IS_SCALE_AND_SHIFT_FLAG_SET
#undef SCALE_SHIFT_NET_ARGS
#undef SET_MKL_LAYOUT
#endif | #ifdef INTEL_MKL
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/kernels/conv_ops_gpu.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
static const uint8 dummy_tensor[] = {0, 0, 0, 0, 0, 0, 0, 0};
static const TensorShape dummy_shape({8});
using GraphRunner = std::function<void(
const Tensor& input, const Tensor& scale, const Tensor& offset,
const Tensor& mean, const Tensor& variance,
const float exponential_avg_factor, const bool is_training, Tensor* output,
Tensor* batch_mean, Tensor* batch_var)>;
using GraphRunnerGrad = std::function<void(
const Tensor& input, const Tensor& filter, const Tensor& y_backprop,
const Tensor& scale, const Tensor& mean, const Tensor& variance,
const Tensor& res_sp3, Tensor* output, Tensor* scale_backprop,
Tensor* offset_backprop, bool disable_grappler_opts)>;
template <typename T>
class CommonTestUtilities : public OpsTestBase {
public:
void TestBody() {}
static void VerifyTensorsClose(const float exponential_avg_factor,
const bool is_training, const GraphRunner& run,
const GraphRunner& run_mkl) {
int batch = 1;
int height = 10;
int width = 10;
int depth = 3;
DataType dtype = DataTypeToEnum<T>::v();
Tensor input(dtype, {batch, height, width, depth});
input.flat<T>() = input.flat<T>().template setRandom<random_gen_>();
Tensor scale(dtype, {depth});
scale.flat<T>() = scale.flat<T>().template setRandom<random_gen_>();
Tensor offset(dtype, {depth});
offset.flat<T>() = offset.flat<T>().template setRandom<random_gen_>();
if (is_training && (exponential_avg_factor == 1.0)) {
depth = 0;
}
Tensor mean(dtype, {depth});
mean.flat<T>() = mean.flat<T>().template setRandom<random_gen_>();
Tensor variance(dtype, {depth});
variance.flat<T>() =
variance.flat<T>().template setRandom<random_gen_>().abs();
Tensor output;
Tensor batch_mean;
Tensor batch_var;
Tensor mkl_output;
Tensor mkl_batch_mean;
Tensor mkl_batch_var;
run(input, scale, offset, mean, variance, exponential_avg_factor,
is_training, &output, &batch_mean, &batch_var);
run_mkl(input, scale, offset, mean, variance, exponential_avg_factor,
is_training, &mkl_output, &mkl_batch_mean, &mkl_batch_var);
ASSERT_EQ(output.dtype(), mkl_output.dtype());
ASSERT_EQ(output.shape(), mkl_output.shape());
ASSERT_EQ(batch_mean.dtype(), mkl_batch_mean.dtype());
ASSERT_EQ(batch_mean.shape(), mkl_batch_mean.shape());
ASSERT_EQ(batch_var.dtype(), mkl_batch_var.dtype());
ASSERT_EQ(batch_var.shape(), mkl_batch_var.shape());
test::ExpectClose(output, mkl_output, 1e-5);
test::ExpectClose(batch_mean, mkl_batch_mean, 1e-5);
test::ExpectClose(batch_var, mkl_batch_var, 1e-5);
}
static void VerifyTensorsCloseForGrad(const float epsilon,
const GraphRunnerGrad& run,
const GraphRunnerGrad& run_mkl) {
int batch = 2;
int height = 8;
int width = 8;
int depth = 1;
int filter_height = 3;
int filter_width = 3;
int in_channels = 1;
int out_channels = 6;
DataType dtype = DataTypeToEnum<T>::v();
Tensor input(dtype, {batch, height, width, depth});
input.flat<T>() = input.flat<T>().template setRandom<random_gen_>();
Tensor filter(dtype,
{filter_height, filter_width, in_channels, out_channels});
filter.flat<T>() = filter.flat<T>().template setRandom<random_gen_>();
Tensor y_backprop(dtype, {batch, height, width, out_channels});
y_backprop.flat<T>() =
y_backprop.flat<T>().template setRandom<random_gen_>();
Tensor scale(dtype, {out_channels});
scale.flat<T>() = scale.flat<T>().template setRandom<random_gen_>();
Tensor mean(dtype, {out_channels});
mean.flat<T>() = mean.flat<T>().template setRandom<random_gen_>();
Tensor variance(dtype, {out_channels});
variance.flat<T>() =
variance.flat<T>().template setRandom<random_gen_>().abs();
Tensor res_sp3(dtype, {out_channels});
res_sp3.flat<T>() =
res_sp3.flat<T>().template setRandom<random_gen_>().abs();
Tensor output;
Tensor scale_backprop;
Tensor offset_backprop;
Tensor mkl_output;
Tensor mkl_scale_backprop;
Tensor mkl_offset_backprop;
run(input, filter, y_backprop, scale, mean, variance, res_sp3, &output,
&scale_backprop, &offset_backprop, epsilon);
run_mkl(input, filter, y_backprop, scale, mean, variance, res_sp3,
&mkl_output, &mkl_scale_backprop, &mkl_offset_backprop, epsilon);
ASSERT_EQ(output.dtype(), mkl_output.dtype());
ASSERT_EQ(output.shape(), mkl_output.shape());
ASSERT_EQ(scale_backprop.dtype(), mkl_scale_backprop.dtype());
ASSERT_EQ(scale_backprop.shape(), mkl_scale_backprop.shape());
ASSERT_EQ(offset_backprop.dtype(), mkl_offset_backprop.dtype());
ASSERT_EQ(offset_backprop.shape(), mkl_offset_backprop.shape());
test::ExpectClose(output, mkl_output, 1e-5);
test::ExpectClose(scale_backprop, mkl_scale_backprop, 1e-5,
1e-5);
test::ExpectClose(offset_backprop, mkl_offset_backprop, 1e-5);
}
private:
using random_gen_ = Eigen::internal::NormalRandomGenerator<T>;
};
template <typename T>
class Conv2DOpTest : public OpsTestBase {
void TestBody() {}
public:
void RunConv2D(const Tensor& input, const Tensor& filter, Tensor* output) {
DataType dtype = DataTypeToEnum<T>::v();
TF_EXPECT_OK(NodeDefBuilder("MklConv2D", "_MklNativeConv2D")
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("data_format", "NHWC")
.Attr("_kernel", "MklNameChangeOp")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<T>(input.shape(), input.flat<T>());
AddInputFromArray<T>(filter.shape(), filter.flat<T>());
TF_ASSERT_OK(RunOpKernel());
*output = *GetOutput(0);
}
};
template <typename T>
class FusedBatchNormOpTest : public OpsTestBase {
protected:
void VerifyFusedBatchNorm(const float exponential_avg_factor,
const bool is_training) {
const GraphRunner run = [this](const Tensor& input, const Tensor& scale,
const Tensor& offset, const Tensor& mean,
const Tensor& variance,
const float exponential_avg_factor,
const bool is_training, Tensor* output,
Tensor* batch_mean, Tensor* batch_var) {
auto root = tensorflow::Scope::NewRootScope();
auto input_op =
ops::Const(root.WithOpName("input"), Input::Initializer(input));
auto scale_op =
ops::Const(root.WithOpName("scale"), Input::Initializer(scale));
auto offset_op =
ops::Const(root.WithOpName("offset"), Input::Initializer(offset));
auto mean_op =
ops::Const(root.WithOpName("mean"), Input::Initializer(mean));
auto var_op =
ops::Const(root.WithOpName("variance"), Input::Initializer(variance));
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(is_training);
attr = attr.ExponentialAvgFactor(exponential_avg_factor);
attr = attr.Epsilon(0.001);
auto bn = ops::FusedBatchNorm(root.WithOpName("FusedBatchNorm"), input_op,
scale_op, offset_op, mean_op, var_op, attr);
auto y = ops::Identity(root.WithOpName("y"), bn.y);
auto y_batch_mean =
ops::Identity(root.WithOpName("y_batch_mean"), bn.batch_mean);
auto y_batch_var =
ops::Identity(root.WithOpName("y_batch_var"), bn.batch_variance);
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(session->Run({}, {"y", "y_batch_mean", "y_batch_var"}, {},
&output_tensors));
*output = output_tensors[0];
*batch_mean = output_tensors[1];
*batch_var = output_tensors[2];
};
const GraphRunner run_mkl = [this](const Tensor& input, const Tensor& scale,
const Tensor& offset, const Tensor& mean,
const Tensor& variance,
const float exponential_avg_factor,
const bool is_training, Tensor* output,
Tensor* batch_mean, Tensor* batch_var) {
DataType dtype = DataTypeToEnum<T>::v();
TF_EXPECT_OK(
NodeDefBuilder("MklNativeFusedBatchNorm", "_MklNativeFusedBatchNorm")
.Input(FakeInput(dtype))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", exponential_avg_factor)
.Attr("epsilon", 0.001)
.Attr("is_training", is_training)
.Attr("_kernel", "MklNameChangeOp")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<T>(input.shape(), input.flat<T>());
AddInputFromArray<float>(scale.shape(), scale.flat<float>());
AddInputFromArray<float>(offset.shape(), offset.flat<float>());
AddInputFromArray<float>(mean.shape(), mean.flat<float>());
AddInputFromArray<float>(variance.shape(), variance.flat<float>());
TF_ASSERT_OK(RunOpKernel());
*output = *GetOutput(0);
*batch_mean = *GetOutput(1);
*batch_var = *GetOutput(2);
};
CommonTestUtilities<T>::VerifyTensorsClose(exponential_avg_factor,
is_training, run, run_mkl);
}
void VerifyFusedBatchNormGradWithConv2D(const float epsilon) {
const GraphRunnerGrad run =
[this](const Tensor& input, const Tensor& filter,
const Tensor& y_backprop, const Tensor& scale,
const Tensor& mean, const Tensor& variance,
const Tensor& res_sp3, Tensor* x_backprop_tensor,
Tensor* scale_backprop_tensor, Tensor* offset_backprop_tensor,
const float epsilon) {
auto root = tensorflow::Scope::NewRootScope();
auto input_op =
ops::Const(root.WithOpName("input"), Input::Initializer(input));
auto filter_op =
ops::Const(root.WithOpName("filter"), Input::Initializer(filter));
ops::Conv2D::Attrs conv_attr;
conv_attr = conv_attr.DataFormat("NHWC");
auto conv = ops::Conv2D(root.WithOpName("Conv"), input_op, filter_op,
{1, 1, 1, 1}, "SAME", conv_attr);
auto y_backprop_op = ops::Const(root.WithOpName("y_backprop"),
Input::Initializer(y_backprop));
auto scale_op =
ops::Const(root.WithOpName("scale"), Input::Initializer(scale));
auto mean_op =
ops::Const(root.WithOpName("mean"), Input::Initializer(mean));
auto var_op = ops::Const(root.WithOpName("variance"),
Input::Initializer(variance));
auto res_sp3_op = ops::Const(root.WithOpName("reserve_space_3"),
Input::Initializer(res_sp3));
ops::FusedBatchNormGradV3::Attrs bn_attr;
bn_attr = bn_attr.IsTraining(true);
bn_attr = bn_attr.Epsilon(epsilon);
bn_attr = bn_attr.DataFormat("NHWC");
auto bn = ops::FusedBatchNormGradV3(
root.WithOpName("FusedBatchNormGrad"), y_backprop_op, conv,
scale_op, mean_op, var_op, res_sp3_op, bn_attr);
auto x_backprop =
ops::Identity(root.WithOpName("x_backprop"), bn.x_backprop);
auto scale_backprop = ops::Identity(root.WithOpName("scale_backprop"),
bn.scale_backprop);
auto offset_backprop = ops::Identity(
root.WithOpName("offset_backprop"), bn.offset_backprop);
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
tensorflow::SessionOptions session_options;
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(session->Run(
{}, {"x_backprop", "scale_backprop", "offset_backprop"}, {},
&output_tensors));
*x_backprop_tensor = output_tensors[0];
*scale_backprop_tensor = output_tensors[1];
*offset_backprop_tensor = output_tensors[2];
};
const GraphRunnerGrad run_mkl =
[this](const Tensor& input, const Tensor& filter,
const Tensor& y_backprop, const Tensor& scale,
const Tensor& mean, const Tensor& variance,
const Tensor& res_sp3, Tensor* x_backprop_tensor,
Tensor* scale_backprop_tensor, Tensor* offset_backprop_tensor,
const float epsilon) {
Tensor conv2d_output;
Conv2DOpTest<T> conv2d_test;
conv2d_test.RunConv2D(input, filter, &conv2d_output);
DataType dtype = DataTypeToEnum<T>::v();
TF_EXPECT_OK(NodeDefBuilder("MklFusedBatchNorm",
"_MklNativeFusedBatchNormGradV3")
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", epsilon)
.Attr("is_training", true)
.Attr("data_format", "NHWC")
.Attr("_kernel", "MklNameChangeOp")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<T>(y_backprop.shape(), y_backprop.flat<T>());
AddInputFromArray<T>(conv2d_output.shape(), conv2d_output.flat<T>());
AddInputFromArray<float>(scale.shape(), scale.flat<float>());
AddInputFromArray<float>(mean.shape(), mean.flat<float>());
AddInputFromArray<float>(variance.shape(), variance.flat<float>());
AddInputFromArray<float>(res_sp3.shape(), res_sp3.flat<float>());
TF_ASSERT_OK(RunOpKernel());
*x_backprop_tensor = *GetOutput(0);
*scale_backprop_tensor = *GetOutput(1);
*offset_backprop_tensor = *GetOutput(2);
};
CommonTestUtilities<T>::VerifyTensorsCloseForGrad(epsilon, run, run_mkl);
}
};
TYPED_TEST_SUITE_P(FusedBatchNormOpTest);
TYPED_TEST_P(FusedBatchNormOpTest, Training) {
const float exponential_avg_factor = 1.0;
const bool is_training = true;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, TrainingRunningMean) {
const float exponential_avg_factor = 0.5;
const bool is_training = true;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, Inference) {
const float exponential_avg_factor = 1.0;
const bool is_training = false;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, InferenceIgnoreAvgFactor) {
const float exponential_avg_factor = 0.5;
const bool is_training = false;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, FusedBatchNormGradV3) {
const float epsilon = 0.001;
this->VerifyFusedBatchNormGradWithConv2D(epsilon);
}
REGISTER_TYPED_TEST_SUITE_P(FusedBatchNormOpTest, Training, TrainingRunningMean,
Inference, InferenceIgnoreAvgFactor,
FusedBatchNormGradV3);
using FusedBatchNormDataTypes = ::testing::Types<float>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedBatchNormOpTest,
FusedBatchNormDataTypes);
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
026ad976-bcd8-45e2-a971-4ccda66bdadd | cpp | tensorflow/tensorflow | mkl_quantize_op | tensorflow/core/kernels/mkl/mkl_quantize_op.cc | tensorflow/core/kernels/mkl/mkl_quantize_op_test.cc | #ifdef INTEL_MKL
#define EIGEN_USE_THREADS
#include "dnnl.hpp"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/mkl_util.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
using dnnl::primitive_attr;
using dnnl::prop_kind;
using dnnl::reorder;
using dnnl::stream;
namespace {
enum {
QUANTIZE_MODE_MIN_COMBINED,
QUANTIZE_MODE_MIN_FIRST,
QUANTIZE_MODE_SCALED,
};
enum {
ROUND_HALF_AWAY_FROM_ZERO,
ROUND_HALF_TO_EVEN,
};
}
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define SET_MKL_LAYOUT(md) SetMklLayout(&md)
#else
#define SET_MKL_LAYOUT(md) SetMklLayout(md)
#endif
typedef Eigen::ThreadPoolDevice CPUDevice;
struct MklReorderWithScaleFwdParams {
memory::dims src_dims;
memory::desc src_md;
memory::desc dst_md;
#ifdef ENABLE_ONEDNN_V3
memory::desc scale_md;
#endif
string dtypes = string("");
struct PostOpParam {
string name;
std::vector<float> param;
};
PostOpParam post_op_params;
#ifndef ENABLE_ONEDNN_V3
MklReorderWithScaleFwdParams(memory::dims src_dims, memory::desc src_md,
memory::desc dst_md)
: src_dims(src_dims), src_md(src_md), dst_md(dst_md) {}
#else
MklReorderWithScaleFwdParams(memory::dims src_dims, memory::desc src_md,
memory::desc dst_md, memory::desc scale_md)
: src_dims(src_dims),
src_md(src_md),
dst_md(dst_md),
scale_md(scale_md) {}
#endif
};
class MklReorderWithScalePrimitive : public MklPrimitive {
public:
explicit MklReorderWithScalePrimitive(
const MklReorderWithScaleFwdParams& fwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
Setup(fwdParams);
}
~MklReorderWithScalePrimitive() {}
std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; }
void Execute(void* src_data, void* dst_data,
#ifdef ENABLE_ONEDNN_V3
void* scale_data,
#endif
std::shared_ptr<stream> reorder_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
context_.src_mem->set_data_handle(src_data, *reorder_stream);
context_.dst_mem->set_data_handle(dst_data, *reorder_stream);
#else
context_.src_mem->set_data_handle(src_data);
context_.dst_mem->set_data_handle(dst_data);
#endif
#ifdef ENABLE_ONEDNN_V3
context_.scale_mem->set_data_handle(scale_data);
#endif
context_.reorder_prim->execute(*reorder_stream, context_.prim_args);
context_.src_mem->set_data_handle(DummyData);
context_.dst_mem->set_data_handle(DummyData);
#ifdef ENABLE_ONEDNN_V3
context_.scale_mem->set_data_handle(DummyData);
#endif
}
private:
struct ReorderContext {
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> dst_mem;
#ifdef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::memory> scale_mem;
#endif
std::shared_ptr<reorder::primitive_desc> reorder_pd;
std::shared_ptr<primitive> reorder_prim;
std::shared_ptr<dnnl::stream> reorder_stream;
std::unordered_map<int, dnnl::memory> prim_args;
ReorderContext()
: src_mem(nullptr),
dst_mem(nullptr),
#ifdef ENABLE_ONEDNN_V3
scale_mem(nullptr),
#endif
reorder_pd(nullptr),
reorder_prim(nullptr) {
}
} context_;
void Setup(const MklReorderWithScaleFwdParams& fwdParams) {
context_.src_mem.reset(
new memory(fwdParams.src_md, cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(fwdParams.dst_md, cpu_engine_, DummyData));
#ifdef ENABLE_ONEDNN_V3
context_.scale_mem.reset(
new memory(fwdParams.scale_md, cpu_engine_, DummyData));
#endif
dnnl::primitive_attr post_ops_attr;
#ifndef ENABLE_ONEDNN_V3
auto const& post_op_params = fwdParams.post_op_params;
DCHECK(post_op_params.name == "scale");
DCHECK_EQ(post_op_params.param.size(), 1);
std::vector<float> scales;
scales.push_back(post_op_params.param[0]);
post_ops_attr.set_output_scales(0, scales);
#else
post_ops_attr.set_scales_mask(DNNL_ARG_SRC, 0 );
#endif
context_.reorder_pd.reset(
new ReorderPd(cpu_engine_, context_.src_mem->get_desc(), cpu_engine_,
context_.dst_mem->get_desc(), post_ops_attr));
context_.reorder_prim.reset(new reorder(*context_.reorder_pd));
context_.prim_args.insert({DNNL_ARG_FROM, *context_.src_mem});
context_.prim_args.insert({DNNL_ARG_TO, *context_.dst_mem});
#ifdef ENABLE_ONEDNN_V3
context_.prim_args.insert(
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.scale_mem});
#endif
}
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T>
class MklReorderWithScalePrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderWithScalePrimitive* Get(
const memory* from, const memory* to,
const MklReorderWithScaleFwdParams& fwdParams) {
auto reorderPrim = static_cast<MklReorderWithScalePrimitive*>(
MklReorderWithScalePrimitiveFactory<T>::GetInstance().GetReorder(
from, to, fwdParams));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderWithScalePrimitive(fwdParams);
MklReorderWithScalePrimitiveFactory<T>::GetInstance().SetReorder(
from, to, reorderPrim, fwdParams);
}
return reorderPrim;
}
static MklReorderWithScalePrimitiveFactory& GetInstance() {
static MklReorderWithScalePrimitiveFactory instance_;
return instance_;
}
private:
MklReorderWithScalePrimitiveFactory() {}
~MklReorderWithScalePrimitiveFactory() {}
static string CreateKey(const memory* from, const memory* to,
const MklReorderWithScaleFwdParams& fwdParams) {
FactoryKeyCreator key_creator;
key_creator.AddAsKey(MklReorderPrimitiveFactory<T>::CreateKey(from, to));
if (fwdParams.post_op_params.name == "scale") {
DCHECK_EQ(fwdParams.post_op_params.param.size(), 1);
key_creator.AddAsKey(fwdParams.post_op_params.name);
key_creator.AddAsKey(fwdParams.post_op_params.param[0]);
} else {
return string("not_a_key");
}
return key_creator.GetKey();
}
MklPrimitive* GetReorder(const memory* from, const memory* to,
const MklReorderWithScaleFwdParams& fwdParams) {
string key = CreateKey(from, to, fwdParams);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op,
const MklReorderWithScaleFwdParams& fwdParams) {
string key = CreateKey(from, to, fwdParams);
this->SetOp(key, op);
}
};
template <typename Device, typename T, typename S, bool native_format = false>
class MklQuantizeV2Op : public OpKernel {
public:
explicit MklQuantizeV2Op(OpKernelConstruction* ctx) : OpKernel(ctx) {
string mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("mode", &mode_string));
OP_REQUIRES(ctx,
(mode_string == "MIN_COMBINED" || mode_string == "MIN_FIRST" ||
mode_string == "SCALED"),
absl::InvalidArgumentError(
absl::StrCat("Mode string must be 'MIN_COMBINED',"
" 'MIN_FIRST', or 'SCALED', is '" +
mode_string + "'")));
if (mode_string == "MIN_COMBINED") {
mode_ = QUANTIZE_MODE_MIN_COMBINED;
} else if (mode_string == "MIN_FIRST") {
mode_ = QUANTIZE_MODE_MIN_FIRST;
} else if (mode_string == "SCALED") {
mode_ = QUANTIZE_MODE_SCALED;
}
string round_mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("round_mode", &round_mode_string));
OP_REQUIRES(
ctx,
(round_mode_string == "HALF_AWAY_FROM_ZERO" ||
round_mode_string == "HALF_TO_EVEN"),
absl::InvalidArgumentError(absl::StrCat("Round mode string must be "
"'HALF_AWAY_FROM_ZERO' or "
"'HALF_TO_EVEN', is '" +
round_mode_string + "'")));
if (round_mode_string == "HALF_AWAY_FROM_ZERO") {
round_mode_ = ROUND_HALF_AWAY_FROM_ZERO;
} else if (round_mode_string == "HALF_TO_EVEN") {
OP_REQUIRES(ctx, mode_string == "SCALED",
absl::InvalidArgumentError(
absl::StrCat("Round mode 'HALF_TO_EVEN' "
"only supported for mode 'SCALED', "
"but mode is '" +
mode_string + "'.")));
round_mode_ = ROUND_HALF_TO_EVEN;
}
OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr("ensure_minimum_range", &ensure_minimum_range_));
}
void ComputeScalar(OpKernelContext* ctx, float min_range, float max_range) {
OP_REQUIRES(ctx, (mode_ == QUANTIZE_MODE_MIN_FIRST),
absl::InvalidArgumentError(
"Scalar calculation in MKL is supported only for"
"MIN_FIRST mode for now."));
const Tensor& min_tensor = ctx->input(1);
const Tensor& max_tensor = ctx->input(2);
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(min_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_input` must be rank 0 but is rank ", min_tensor.dims())));
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(max_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_input` must be rank 0 but is rank ", max_tensor.dims())));
auto cpu_engine = engine(engine::kind::cpu, 0);
const unsigned int src_idx = 0;
const Tensor& src_tensor = MklGetInput(ctx, src_idx);
MklDnnShape output_mkl_shape;
output_mkl_shape.SetMklTensor(false);
Tensor* output_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 0, &output_tensor, src_tensor.shape(),
output_mkl_shape, native_format);
TensorShape min_tf_shape = {};
MklDnnShape min_mkl_shape;
min_mkl_shape.SetMklTensor(false);
Tensor* output_min_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 1, &output_min_tensor, min_tf_shape,
min_mkl_shape, native_format);
TensorShape max_tf_shape = {};
MklDnnShape max_mkl_shape;
max_mkl_shape.SetMklTensor(false);
Tensor* output_max_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 2, &output_max_tensor, max_tf_shape,
max_mkl_shape, native_format);
float scale_factor = 0;
const int number_of_bits = sizeof(T) * 8;
const int64 number_of_steps = static_cast<int64_t>(1) << number_of_bits;
scale_factor = (number_of_steps - 1.0) / (max_range - min_range);
float* src_data = const_cast<float*>(src_tensor.flat<float>().data());
T* out_data = output_tensor->flat<T>().data();
out_data[0] = (src_data[0] - min_range) * scale_factor;
output_min_tensor->scalar<float>()() = min_range;
output_max_tensor->scalar<float>()() = max_range;
return;
}
void Compute(OpKernelContext* ctx) override {
const unsigned int src_idx = 0;
const float input_min_range = ctx->input(1).scalar<float>()();
const float input_max_range = ctx->input(2).scalar<float>()();
float min_range = std::min(0.0f, input_min_range);
float max_range;
OP_REQUIRES(ctx, (input_max_range >= input_min_range),
absl::InvalidArgumentError(
"input_max_range must be larger than input_min_range."));
const float epsilon = std::max(1.0f, std::max(fabsf(input_min_range),
fabsf(input_max_range))) *
ensure_minimum_range_;
max_range = std::max(input_max_range, min_range + epsilon);
max_range = std::max(0.0f, max_range);
auto cpu_engine = engine(engine::kind::cpu, 0);
const Tensor& src_tensor = MklGetInput(ctx, src_idx);
MklDnnShape src_mkl_shape;
GetMklShape(ctx, src_idx, &src_mkl_shape, native_format);
auto src_tf_shape = src_mkl_shape.IsMklTensor() ? src_mkl_shape.GetTfShape()
: src_tensor.shape();
auto src_dims = src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDims(src_tensor.shape());
auto output_dims = src_dims;
memory::format_tag dst_layout_type;
switch (src_tf_shape.dims()) {
case 0:
ComputeScalar(ctx, min_range, max_range);
return;
case 1:
dst_layout_type = memory::format_tag::x;
break;
case 2:
dst_layout_type = memory::format_tag::nc;
break;
case 3:
dst_layout_type = memory::format_tag::tnc;
break;
case 4:
dst_layout_type = memory::format_tag::nhwc;
break;
case 5:
dst_layout_type = memory::format_tag::ndhwc;
break;
default:
OP_REQUIRES_OK(ctx,
absl::AbortedError("Input dims must be <= 5 and >= 1"));
return;
}
MklDnnData<S> src(&cpu_engine);
MklDnnData<T> dst(&cpu_engine);
#ifdef ENABLE_ONEDNN_V3
MklDnnData<float> scale(&cpu_engine);
#endif
auto src_md =
src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetMklLayout()
: memory::desc(src_dims, MklDnnType<S>(), dst_layout_type);
src.SetUsrMem(src_md, &src_tensor);
memory::desc dst_md =
memory::desc(src_dims, MklDnnType<T>(), dst_layout_type);
MklDnnShape output_mkl_shape;
TensorShape output_tf_shape;
if (src_mkl_shape.IsMklTensor()) {
output_mkl_shape.SetMklTensor(true);
output_mkl_shape.SET_MKL_LAYOUT(dst_md);
output_mkl_shape.SetElemType(MklDnnType<T>());
output_mkl_shape.SetTfLayout(src_mkl_shape.GetDimension(),
src_mkl_shape.GetSizesAsMklDnnDims(),
src_mkl_shape.GetTfDataFormat());
output_tf_shape.AddDim(dst_md.get_size() / sizeof(T));
} else {
output_mkl_shape.SetMklTensor(false);
output_tf_shape = MklDnnDimsToTFShape(output_dims);
}
Tensor* output_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 0, &output_tensor, output_tf_shape,
output_mkl_shape, native_format);
dst.SetUsrMem(dst_md, output_tensor);
TensorShape min_tf_shape = {};
MklDnnShape min_mkl_shape;
min_mkl_shape.SetMklTensor(false);
Tensor* output_min_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 1, &output_min_tensor, min_tf_shape,
min_mkl_shape, native_format);
TensorShape max_tf_shape = {};
MklDnnShape max_mkl_shape;
max_mkl_shape.SetMklTensor(false);
Tensor* output_max_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 2, &output_max_tensor, max_tf_shape,
max_mkl_shape, native_format);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(ctx);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
float scale_factor = 0;
if (mode_ == QUANTIZE_MODE_SCALED) {
const int num_bits = sizeof(T) * 8;
const float max_abs = std::max(std::abs(min_range), std::abs(max_range));
const bool is_signed = std::is_same<T, qint8>() ||
std::is_same<T, qint16>() ||
std::is_same<T, qint32>();
float target_range;
if (is_signed) {
max_range = max_abs;
min_range = -max_abs;
target_range = static_cast<float>((uint64_t{1} << num_bits) - 1) / 2.;
} else {
max_range = max_abs;
min_range = 0.0;
target_range = static_cast<float>((uint64_t{1} << num_bits) - 1);
}
scale_factor = target_range / max_abs;
#ifdef ENABLE_ONEDNN_V3
auto scale_md =
memory::desc({1}, MklDnnType<float>(), memory::format_tag::x);
MklReorderWithScaleFwdParams fwdParams(src_dims, src_md, dst_md,
scale_md);
Tensor scale_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, {1}, &scale_tensor));
scale_tensor.flat<float>()(0) = scale_factor;
scale.SetUsrMem(scale_md, &scale_tensor);
#else
MklReorderWithScaleFwdParams fwdParams(src_dims, src_md, dst_md);
#endif
fwdParams.dtypes.append(typeid(S).name());
fwdParams.dtypes.append(typeid(T).name());
fwdParams.post_op_params.name = "scale";
fwdParams.post_op_params.param.push_back(scale_factor);
MklReorderWithScalePrimitive* reorder_prim =
MklReorderWithScalePrimitiveFactory<T>::Get(
src.GetUsrMem(), dst.GetUsrMem(), fwdParams);
std::shared_ptr<stream> cpu_stream;
cpu_stream.reset(CreateStream(&eigen_tp, reorder_prim->GetEngine()));
reorder_prim->Execute(src.GetUsrMemDataHandle(),
dst.GetUsrMemDataHandle(),
#ifdef ENABLE_ONEDNN_V3
scale.GetUsrMemDataHandle(),
#endif
cpu_stream);
} else if (mode_ == QUANTIZE_MODE_MIN_FIRST) {
using namespace dnnl;
std::shared_ptr<stream> cpu_stream;
cpu_stream.reset(CreateStream(&eigen_tp, cpu_engine));
auto shift = static_cast<S>(-min_range);
memory::dims shift_dims(src_tf_shape.dims(), 1);
auto shift_md =
memory::desc(shift_dims, MklDnnType<S>(), dst_layout_type);
memory shift_mem(shift_md, cpu_engine, (void*)(&shift));
primitive_attr attr;
std::vector<float> src_0_scale{255.0f / (max_range - min_range)};
std::vector<float> src_1_scale{255.0f / (max_range - min_range)};
#ifdef ENABLE_ONEDNN_V3
attr.set_scales_mask(DNNL_ARG_SRC_0, 0);
attr.set_scales_mask(DNNL_ARG_SRC_1, 0);
auto binary_pd = binary::primitive_desc(cpu_engine, algorithm::binary_add,
src_md, shift_md, dst_md, attr);
#else
attr.set_scales(DNNL_ARG_SRC_0, 0, src_0_scale);
attr.set_scales(DNNL_ARG_SRC_1, 0, src_1_scale);
auto binary_d =
binary::desc(algorithm::binary_add, src_md, shift_md, dst_md);
auto binary_pd = binary::primitive_desc(binary_d, attr, cpu_engine);
#endif
auto binary_prim = binary(binary_pd);
auto src_0_scale_mem =
memory({{1}, MklDnnType<float>(), memory::format_tag::x}, cpu_engine,
src_0_scale.data());
auto src_1_scale_mem =
memory({{1}, MklDnnType<float>(), memory::format_tag::x}, cpu_engine,
src_1_scale.data());
std::unordered_map<int, memory> net_args{
{DNNL_ARG_SRC_0, *src.GetUsrMem()},
{DNNL_ARG_SRC_1, shift_mem},
{DNNL_ARG_DST, *dst.GetUsrMem()},
#ifdef ENABLE_ONEDNN_V3
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC_0, src_0_scale_mem},
{ DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC_1,
src_1_scale_mem }
#endif
};
binary_prim.execute(*cpu_stream, net_args);
} else {
OP_REQUIRES(ctx, false,
absl::UnimplementedError(
"Supported modes are MIN_FIRST and SCALED only."));
}
output_min_tensor->scalar<float>()() = min_range;
output_max_tensor->scalar<float>()() = max_range;
}
private:
float ensure_minimum_range_;
int mode_;
int round_mode_;
int axis_;
bool narrow_range_;
};
#define REGISTER_QUANTIZE(src_type, dst_type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklQuantizeV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<dst_type>("T") \
.Label(mkl_op_registry::kMklQuantizedOpLabel), \
MklQuantizeV2Op<CPUDevice, dst_type, src_type, true>)
REGISTER_QUANTIZE(float, qint8);
REGISTER_QUANTIZE(float, quint8);
#undef SET_MKL_LAYOUT
}
#endif | #if defined(INTEL_MKL)
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
class MklQuantizeV2OpTest : public OpsTestBase {};
TEST_F(MklQuantizeV2OpTest, small_uint8) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "SCALED")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{0.0, 1.0, 1.25, 1.75, 127.0, 255.0, 500.0, 2.0});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
Tensor expected_min(allocator(), DT_FLOAT, TensorShape({}));
Tensor expected_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<quint8>(&expected, {0, 1, 1, 2, 127, 255, 255, 2});
test::FillValues<float>(&expected_min, {0.0});
test::FillValues<float>(&expected_max, {255.0});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
test::ExpectTensorEqual<float>(expected_min, *GetOutput(1));
test::ExpectTensorEqual<float>(expected_max, *GetOutput(2));
}
TEST_F(MklQuantizeV2OpTest, small_int8) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}), {0.0, -1.0, 1.25, -1.75, -24.5,
-255.0, -80.315, 256.0});
AddInputFromArray<float>(TensorShape({}), {-50.0});
AddInputFromArray<float>(TensorShape({}), {127.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({8}));
Tensor expected_min(allocator(), DT_FLOAT, TensorShape({}));
Tensor expected_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<qint8>(&expected, {0, -1, 1, -2, -25, -128, -81, 127});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
test::FillValues<float>(&expected_min, {-127.0});
test::FillValues<float>(&expected_max, {127.0});
test::ExpectTensorEqual<float>(expected_min, *GetOutput(1));
test::ExpectTensorEqual<float>(expected_max, *GetOutput(2));
}
TEST_F(MklQuantizeV2OpTest, small_minfirst) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{1.0, 1.25, 1.75, 2, 3.15, 127.0, 255.0, 500.0});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
test::FillValues<quint8>(&expected, {1, 1, 2, 2, 3, 127, 255, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
EXPECT_NEAR(0.0f, output_min, 1e-5f);
EXPECT_NEAR(255.0f, output_max, 1e-5f);
}
TEST_F(MklQuantizeV2OpTest, small_minfirst_uint) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.1});
AddInputFromArray<float>(TensorShape({}), {0.8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
test::FillValues<quint8>(&expected, {32, 64, 96, 128, 159, 191, 223, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
EXPECT_NEAR(0.0f, output_min, 1e-5f);
EXPECT_NEAR(0.8f, output_max, 1e-5f);
}
TEST_F(MklQuantizeV2OpTest, small_minfirst_int) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{-0.1, -0.2, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8});
AddInputFromArray<float>(TensorShape({}), {-0.8});
AddInputFromArray<float>(TensorShape({}), {-0.1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
test::FillValues<quint8>(&expected, {223, 191, 159, 128, 96, 64, 32, 0});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
EXPECT_NEAR(-0.8f, output_min, 1e-5f);
EXPECT_NEAR(0.0f, output_max, 1e-5f);
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
77a4602c-00cd-464d-b22d-1550d5fe04ef | cpp | tensorflow/tensorflow | kernels | tensorflow/c/kernels.cc | tensorflow/c/kernels_test.cc | #include "tensorflow/c/kernels.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "xla/tsl/c/tsl_status_internal.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/resource_handle.pb.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/platform.h"
#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h"
#include "xla/stream_executor/stream.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_internal.h"
#include "tensorflow/core/framework/device.h"
#include "tsl/platform/statusor.h"
#endif
typedef std::function<void()> AsyncOpKernelDoneCallback;
void TF_RunAsyncOpKernelDoneCallback(TF_AsyncOpKernelDoneCallback* done) {
(*reinterpret_cast<AsyncOpKernelDoneCallback*>(done))();
}
struct TF_KernelBuilder {
::tensorflow::KernelDefBuilder* cc_builder;
void* (*create_function)(TF_OpKernelConstruction*);
void (*compute_function)(void*, TF_OpKernelContext*);
void (*compute_async_function)(void*, TF_OpKernelContext*,
TF_AsyncOpKernelDoneCallback* done);
void (*delete_function)(void*);
};
TF_KernelBuilder* TF_NewKernelBuilder(
const char* op_name, const char* device_name,
void* (*create_func)(TF_OpKernelConstruction*),
void (*compute_func)(void*, TF_OpKernelContext*),
void (*delete_func)(void*)) {
TF_KernelBuilder* result = new TF_KernelBuilder;
result->cc_builder = new ::tensorflow::KernelDefBuilder(op_name);
result->cc_builder->Device(device_name);
result->create_function = create_func;
result->compute_function = compute_func;
result->compute_async_function = nullptr;
result->delete_function = delete_func;
return result;
}
TF_KernelBuilder* TF_NewAsyncKernelBuilder(
const char* op_name, const char* device_name,
void* (*create_func)(TF_OpKernelConstruction*),
void (*compute_async_func)(void*, TF_OpKernelContext*,
TF_AsyncOpKernelDoneCallback* done),
void (*delete_func)(void*)) {
TF_KernelBuilder* result = new TF_KernelBuilder;
result->cc_builder = new ::tensorflow::KernelDefBuilder(op_name);
result->cc_builder->Device(device_name);
result->create_function = create_func;
result->compute_function = nullptr;
result->compute_async_function = compute_async_func;
result->delete_function = delete_func;
return result;
}
void TF_DeleteKernelBuilder(TF_KernelBuilder* builder) {
if (builder != nullptr) {
delete builder->cc_builder;
delete builder;
}
}
namespace tensorflow {
namespace {
#define CASE(type) \
case DataTypeToEnum<type>::value: { \
kernel_builder->cc_builder->TypeConstraint<type>(attr_name); \
break; \
}
void AddTypeConstraint(TF_KernelBuilder* kernel_builder, const char* attr_name,
const DataType dtype, TF_Status* status) {
switch (dtype) {
TF_CALL_ALL_TYPES(CASE);
TF_CALL_QUANTIZED_TYPES(CASE);
TF_CALL_quint16(CASE);
TF_CALL_qint16(CASE);
default:
status->status =
absl::UnimplementedError(absl::StrCat("Unexpected type ", dtype));
return;
}
TF_SetStatus(status, TF_OK, "");
}
#undef CASE
}
}
namespace {
const tensorflow::AttrValue* GetAttrValue(TF_OpKernelConstruction* ctx,
const char* attr_name,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
const tensorflow::AttrValue* attr =
::tensorflow::AttrSlice(cc_ctx->def()).Find(attr_name);
if (attr == nullptr) {
status->status = absl::InvalidArgumentError(
absl::StrCat("Operation '", cc_ctx->def().name(),
"' has no attr named '", attr_name, "'."));
}
return attr;
}
}
void TF_KernelBuilder_TypeConstraint(TF_KernelBuilder* kernel_builder,
const char* attr_name,
const TF_DataType type,
TF_Status* status) {
tensorflow::DataType dtype = static_cast<tensorflow::DataType>(type);
tensorflow::AddTypeConstraint(kernel_builder, attr_name, dtype, status);
}
void TF_KernelBuilder_HostMemory(TF_KernelBuilder* kernel_builder,
const char* arg_name) {
kernel_builder->cc_builder->HostMemory(arg_name);
}
void TF_KernelBuilder_Priority(TF_KernelBuilder* kernel_builder,
int32_t priority_number) {
kernel_builder->cc_builder->Priority(priority_number);
}
void TF_KernelBuilder_Label(TF_KernelBuilder* kernel_builder,
const char* label) {
kernel_builder->cc_builder->Label(label);
}
namespace tensorflow {
namespace {
class COpKernel : public OpKernel {
public:
explicit COpKernel(OpKernelConstruction* ctx,
void* (*create_func)(TF_OpKernelConstruction*),
void (*compute_func)(void*, TF_OpKernelContext*),
void (*delete_func)(void*))
: OpKernel(ctx), compute_func_(compute_func), delete_func_(delete_func) {
if (create_func != nullptr) {
c_kernel_ =
(*create_func)(reinterpret_cast<TF_OpKernelConstruction*>(ctx));
} else {
c_kernel_ = nullptr;
}
}
void Compute(OpKernelContext* ctx) override {
(*compute_func_)(c_kernel_, reinterpret_cast<TF_OpKernelContext*>(ctx));
}
~COpKernel() override {
if (delete_func_ != nullptr) {
(*delete_func_)(c_kernel_);
}
}
private:
void (*compute_func_)(void*, TF_OpKernelContext* context);
void (*delete_func_)(void*);
void* c_kernel_;
};
class CAsyncOpKernel : public AsyncOpKernel {
public:
explicit CAsyncOpKernel(
OpKernelConstruction* ctx, void* (*create_func)(TF_OpKernelConstruction*),
void (*compute_async_func)(void*, TF_OpKernelContext*,
TF_AsyncOpKernelDoneCallback*),
void (*delete_func)(void*))
: AsyncOpKernel(ctx),
compute_async_func_(compute_async_func),
delete_func_(delete_func) {
if (create_func != nullptr) {
c_kernel_ =
(*create_func)(reinterpret_cast<TF_OpKernelConstruction*>(ctx));
} else {
c_kernel_ = nullptr;
}
}
void Compute(OpKernelContext* ctx) override {
Notification n;
ComputeAsync(ctx, [&n]() { n.Notify(); });
n.WaitForNotification();
}
void ComputeAsync(OpKernelContext* ctx,
AsyncOpKernelDoneCallback done) override {
(*compute_async_func_)(
c_kernel_, reinterpret_cast<TF_OpKernelContext*>(ctx),
reinterpret_cast<TF_AsyncOpKernelDoneCallback*>(&done));
}
CAsyncOpKernel* AsAsync() override { return this; }
~CAsyncOpKernel() override {
if (delete_func_ != nullptr) {
(*delete_func_)(c_kernel_);
}
}
private:
void (*compute_async_func_)(void*, TF_OpKernelContext* context,
TF_AsyncOpKernelDoneCallback* done);
void (*delete_func_)(void*);
void* c_kernel_;
};
class KernelBuilderFactory
: public ::tensorflow::kernel_factory::OpKernelFactory {
public:
explicit KernelBuilderFactory(TF_KernelBuilder* builder)
: builder_(builder) {}
::tensorflow::OpKernel* Create(
::tensorflow::OpKernelConstruction* context) override {
if (builder_->compute_function)
return new ::tensorflow::COpKernel(context, builder_->create_function,
builder_->compute_function,
builder_->delete_function);
else
return new ::tensorflow::CAsyncOpKernel(
context, builder_->create_function, builder_->compute_async_function,
builder_->delete_function);
}
~KernelBuilderFactory() override { TF_DeleteKernelBuilder(builder_); }
private:
TF_KernelBuilder* builder_;
};
}
}
void TF_RegisterKernelBuilder(const char* name, TF_KernelBuilder* builder,
TF_Status* status) {
using tensorflow::register_kernel::Name;
TF_RegisterKernelBuilderWithKernelDef(
nullptr, name, builder, status);
}
void TF_RegisterKernelBuilderWithKernelDef(const char* serialized_kernel_def,
const char* name,
TF_KernelBuilder* builder,
TF_Status* status) {
using tensorflow::register_kernel::Name;
if (serialized_kernel_def == nullptr) {
tensorflow::kernel_factory::OpKernelRegistrar(
builder->cc_builder->Build(), name,
std::make_unique<tensorflow::KernelBuilderFactory>(builder));
TF_SetStatus(status, TF_OK, "");
return;
}
tensorflow::KernelDef* kernel_def = new tensorflow::KernelDef();
bool success = kernel_def->ParsePartialFromString(serialized_kernel_def);
if (!success) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"Error parsing serialized KernelDef.");
return;
}
tensorflow::kernel_factory::OpKernelRegistrar(
kernel_def, name,
std::make_unique<tensorflow::KernelBuilderFactory>(builder));
TF_SetStatus(status, TF_OK, "");
}
SP_Stream TF_GetStream(TF_OpKernelContext* ctx, TF_Status* status) {
#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD)
status->status = absl::UnimplementedError(
"Accessing device stream is not supported on mobile. File a bug at "
"https:
"important to you");
return nullptr;
#else
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
if (cc_ctx->op_device_context() == nullptr) {
status->status = absl::FailedPreconditionError(
"Accessing device stream is not supported for a CPU device.");
return nullptr;
} else if (!cc_ctx->op_device_context()->IsPluggableDevice()) {
status->status = absl::FailedPreconditionError(
"Accessing device stream is only supported for pluggable devices.");
return nullptr;
} else {
TF_SetStatus(status, TF_OK, "");
auto c_stream = static_cast<stream_executor::CStream*>(
cc_ctx->op_device_context()->stream());
return c_stream->Handle();
}
#endif
}
int TF_NumInputs(TF_OpKernelContext* ctx) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
return cc_ctx->num_inputs();
}
int TF_NumOutputs(TF_OpKernelContext* ctx) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
return cc_ctx->num_outputs();
}
void TF_GetInput(TF_OpKernelContext* ctx, int i, TF_Tensor** tensor,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
if (i < 0 || i >= cc_ctx->num_inputs()) {
TF_SetStatus(status, TF_OUT_OF_RANGE, "input index out of range");
return;
}
const ::tensorflow::Tensor& cc_tensor(cc_ctx->input(i));
if ((&cc_tensor) == nullptr) {
*tensor = nullptr;
return;
}
TF_Tensor* result =
::tensorflow::TF_TensorFromTensor(cc_tensor, &status->status);
if (TF_GetCode(status) == TF_OK) {
*tensor = result;
}
}
void TF_InputRange(TF_OpKernelContext* ctx, const char* name,
TF_InputRange_Args* args) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
int start = -1, stop = -1;
auto status = cc_ctx->op_kernel().InputRange(name, &start, &stop);
args->start = start;
args->stop = stop;
tensorflow::Set_TF_Status_from_Status(args->status, status);
}
TF_DataType TF_InputDatatype(TF_OpKernelContext* ctx, int index) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
CHECK_GE(index, 0);
CHECK_LT(index, cc_ctx->num_inputs());
return static_cast<TF_DataType>(cc_ctx->input_dtype(index));
}
void TF_SetOutput(TF_OpKernelContext* ctx, int i, const TF_Tensor* tensor,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
if (i < 0 || i >= cc_ctx->num_outputs()) {
TF_SetStatus(status, TF_OUT_OF_RANGE, "output index out of range");
return;
}
::tensorflow::Tensor cc_tensor;
absl::Status s = ::tensorflow::TF_TensorToTensor(tensor, &cc_tensor);
::tensorflow::Set_TF_Status_from_Status(status, s);
if (s.ok()) {
cc_ctx->set_output(i, cc_tensor);
}
}
TF_Tensor* TF_GetMutableOutput(TF_OpKernelContext* ctx, int i,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
if (i < 0 || i >= cc_ctx->num_outputs()) {
TF_SetStatus(status, TF_OUT_OF_RANGE, "output index out of range");
return nullptr;
}
const ::tensorflow::Tensor& cc_tensor = *(cc_ctx->mutable_output(i));
TF_Tensor* result =
::tensorflow::TF_TensorFromTensor(cc_tensor, &status->status);
if (TF_GetCode(status) == TF_OK) {
return result;
} else {
return nullptr;
}
}
void TF_GetSerializedFunctionDefLibrary(
TF_OpKernelContext* ctx, TF_Buffer* serialized_function_def_library,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
auto fdef_lib =
cc_ctx->function_library()->GetFunctionLibraryDefinition()->ToProto();
auto cc_status =
tensorflow::MessageToBuffer(fdef_lib, serialized_function_def_library);
tensorflow::Set_TF_Status_from_Status(status, cc_status);
}
void TF_GetSerializedConfigProto(TF_OpKernelContext* ctx,
TF_Buffer* serialized_config_proto,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
const tensorflow::ConfigProto* config_proto_ptr =
cc_ctx->function_library()->config_proto();
tensorflow::ConfigProto config_proto;
if (config_proto_ptr != nullptr) {
config_proto = *config_proto_ptr;
}
auto cc_status =
tensorflow::MessageToBuffer(config_proto, serialized_config_proto);
tensorflow::Set_TF_Status_from_Status(status, cc_status);
}
void TF_GetSerializedResourceHandleProto(
TF_OpKernelContext* ctx, int i, TF_Buffer* serialized_resource_handle_proto,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
const tensorflow::ResourceHandle& handle = HandleFromInput(cc_ctx, i);
tensorflow::ResourceHandleProto handle_proto;
handle.AsProto(&handle_proto);
auto cc_status = tensorflow::MessageToBuffer(
handle_proto, serialized_resource_handle_proto);
tensorflow::Set_TF_Status_from_Status(status, cc_status);
}
void TF_OpKernelConstruction_Failure(TF_OpKernelConstruction* ctx,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
absl::Status s(tsl::StatusFromTF_Status(status));
cc_ctx->CtxFailure(s);
}
void TF_OpKernelContext_Failure(TF_OpKernelContext* ctx, TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
absl::Status s(tsl::StatusFromTF_Status(status));
cc_ctx->CtxFailure(s);
}
void TF_OpKernelConstruction_GetAttrSize(TF_OpKernelConstruction* ctx,
const char* attr_name,
int32_t* list_size,
int32_t* total_size,
TF_Status* status) {
const tensorflow::AttrValue* attr = GetAttrValue(ctx, attr_name, status);
if (!status->status.ok()) {
*list_size = -1;
*total_size = -1;
return;
}
switch (attr->value_case()) {
#define SINGLE_CASE(kK, attr_type, size_expr) \
case tensorflow::AttrValue::kK: \
*list_size = -1; \
*total_size = size_expr; \
break;
SINGLE_CASE(kS, TF_ATTR_STRING, attr->s().length());
SINGLE_CASE(kI, TF_ATTR_INT, -1);
SINGLE_CASE(kF, TF_ATTR_FLOAT, -1);
SINGLE_CASE(kB, TF_ATTR_BOOL, -1);
SINGLE_CASE(kType, TF_ATTR_TYPE, -1);
SINGLE_CASE(kShape, TF_ATTR_SHAPE,
attr->shape().unknown_rank() ? -1 : attr->shape().dim_size());
SINGLE_CASE(kTensor, TF_ATTR_TENSOR, -1);
#undef SINGLE_CASE
case tensorflow::AttrValue::kList:
*list_size = 0;
*total_size = -1;
#define LIST_CASE(field, attr_type, ...) \
if (attr->list().field##_size() > 0) { \
*list_size = attr->list().field##_size(); \
__VA_ARGS__; \
break; \
}
LIST_CASE(
s, TF_ATTR_STRING, *total_size = 0;
for (int i = 0; i < attr->list().s_size();
++i) { *total_size += attr->list().s(i).size(); });
LIST_CASE(i, TF_ATTR_INT);
LIST_CASE(f, TF_ATTR_FLOAT);
LIST_CASE(b, TF_ATTR_BOOL);
LIST_CASE(type, TF_ATTR_TYPE);
LIST_CASE(
shape, TF_ATTR_SHAPE, *total_size = 0;
for (int i = 0; i < attr->list().shape_size(); ++i) {
const auto& s = attr->list().shape(i);
*total_size += s.unknown_rank() ? 0 : s.dim_size();
});
LIST_CASE(tensor, TF_ATTR_TENSOR);
LIST_CASE(tensor, TF_ATTR_FUNC);
#undef LIST_CASE
break;
case tensorflow::AttrValue::kPlaceholder:
*list_size = -1;
*total_size = -1;
break;
case tensorflow::AttrValue::kFunc:
*list_size = -1;
*total_size = -1;
break;
case tensorflow::AttrValue::VALUE_NOT_SET:
status->status = absl::InvalidArgumentError(
absl::StrCat("Attribute '", attr_name, "' has no value set"));
break;
}
}
#define DEFINE_TF_GETATTR(func, c_type, cc_type, attr_type, list_field) \
void TF_OpKernelConstruction_GetAttr##func(TF_OpKernelConstruction* ctx, \
const char* attr_name, \
c_type* val, TF_Status* status) { \
TF_SetStatus(status, TF_OK, ""); \
cc_type v; \
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx); \
absl::Status s = cc_ctx->GetAttr(attr_name, &v); \
::tensorflow::Set_TF_Status_from_Status(status, s); \
if (s.ok()) { \
*val = static_cast<c_type>(v); \
} \
} \
void TF_OpKernelConstruction_GetAttr##func##List( \
TF_OpKernelConstruction* ctx, const char* attr_name, c_type* vals, \
int max_vals, TF_Status* status) { \
TF_SetStatus(status, TF_OK, ""); \
const tensorflow::AttrValue* attr = GetAttrValue(ctx, attr_name, status); \
if (!status->status.ok()) return; \
if (attr->value_case() != tensorflow::AttrValue::kList) { \
status->status = absl::InvalidArgumentError( \
absl::StrCat("Attribute '", attr_name, "' is not a list.")); \
return; \
} \
status->status = \
tensorflow::AttrValueHasType(*attr, "list(" attr_type ")"); \
if (!status->status.ok()) return; \
const auto len = std::min(max_vals, attr->list().list_field##_size()); \
for (int i = 0; i < len; ++i) { \
vals[i] = static_cast<c_type>(attr->list().list_field(i)); \
} \
}
DEFINE_TF_GETATTR(Type, TF_DataType, tensorflow::DataType, "type", type)
DEFINE_TF_GETATTR(Int32, int32_t, int32_t, "int", i)
DEFINE_TF_GETATTR(Int64, int64_t, int64_t, "int", i)
DEFINE_TF_GETATTR(Float, float, float, "float", f)
DEFINE_TF_GETATTR(Bool, TF_Bool, bool, "bool", b)
void TF_OpKernelConstruction_GetAttrString(TF_OpKernelConstruction* ctx,
const char* attr_name, char* value,
size_t max_length,
TF_Status* status) {
std::string v;
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
absl::Status s = cc_ctx->GetAttr(attr_name, &v);
::tensorflow::Set_TF_Status_from_Status(status, s);
if (!status->status.ok()) return;
if (max_length <= 0) {
return;
}
std::memcpy(value, v.data(), std::min<size_t>(v.length(), max_length));
}
void TF_OpKernelConstruction_GetAttrStringList(TF_OpKernelConstruction* ctx,
const char* attr_name,
char** values, size_t* lengths,
int max_values, void* storage,
size_t storage_size,
TF_Status* status) {
std::vector<std::string> v;
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
absl::Status s = cc_ctx->GetAttr(attr_name, &v);
::tensorflow::Set_TF_Status_from_Status(status, s);
if (!status->status.ok()) return;
const auto len = std::min(max_values, static_cast<int>(v.size()));
char* p = static_cast<char*>(storage);
for (int i = 0; i < len; ++i) {
const std::string& s = v[i];
values[i] = p;
lengths[i] = s.size();
if ((p + s.size()) > (static_cast<char*>(storage) + storage_size)) {
status->status = absl::InvalidArgumentError(
"Not enough storage to hold the requested list of strings");
return;
}
std::memcpy(values[i], s.data(), s.size());
p += s.size();
}
}
void TF_OpKernelConstruction_GetAttrTensor(TF_OpKernelConstruction* ctx,
const char* attr_name,
TF_Tensor** val, TF_Status* status) {
*val = nullptr;
::tensorflow::Tensor t;
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
absl::Status s = cc_ctx->GetAttr(attr_name, &t);
::tensorflow::Set_TF_Status_from_Status(status, s);
if (!status->status.ok()) return;
*val = TF_TensorFromTensor(t, &status->status);
}
void TF_OpKernelConstruction_GetAttrTensorList(TF_OpKernelConstruction* ctx,
const char* attr_name,
TF_Tensor** vals, int max_values,
TF_Status* status) {
std::vector<::tensorflow::Tensor> v;
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
absl::Status s = cc_ctx->GetAttr(attr_name, &v);
::tensorflow::Set_TF_Status_from_Status(status, s);
if (!status->status.ok()) return;
const auto len = std::min(max_values, static_cast<int>(v.size()));
for (int i = 0; i < len; ++i) {
vals[i] = TF_TensorFromTensor(v[i], &status->status);
if (!status->status.ok()) return;
}
}
TF_Buffer* TF_OpKernelConstruction_GetAttrFunction(TF_OpKernelConstruction* ctx,
const char* attr_name,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
tensorflow::NameAttrList function;
auto cc_status = cc_ctx->GetAttr(attr_name, &function);
if (!cc_status.ok()) {
tsl::Set_TF_Status_from_Status(status, cc_status);
return nullptr;
}
TF_Buffer* buffer = TF_NewBuffer();
cc_status = tensorflow::MessageToBuffer(function, buffer);
tsl::Set_TF_Status_from_Status(status, cc_status);
if (!cc_status.ok())
return nullptr;
else
return buffer;
}
bool TF_OpKernelConstruction_HasAttr(TF_OpKernelConstruction* ctx,
const char* attr_name, TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
return cc_ctx->HasAttr(attr_name);
}
TF_StringView TF_OpKernelConstruction_GetName(TF_OpKernelConstruction* ctx) {
auto* cc_ctx = reinterpret_cast<tensorflow::OpKernelConstruction*>(ctx);
TF_StringView string_view_of_name;
string_view_of_name.data = cc_ctx->def().name().data();
string_view_of_name.len = cc_ctx->def().name().length();
return string_view_of_name;
}
TF_DataType TF_ExpectedOutputDataType(TF_OpKernelContext* ctx, int i) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
CHECK_GE(i, 0);
CHECK_LT(i, cc_ctx->num_outputs());
return static_cast<TF_DataType>(cc_ctx->expected_output_dtype(i));
}
bool TF_IsHostMemoryInput(TF_OpKernelContext* ctx, int i, TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
if (i < 0 || i >= cc_ctx->num_inputs()) {
TF_SetStatus(status, TF_OUT_OF_RANGE, "input index out of range");
return false;
}
TF_SetStatus(status, TF_OK, "");
return cc_ctx->input_memory_type(i) == tensorflow::HOST_MEMORY;
}
bool TF_IsHostMemoryOutput(TF_OpKernelContext* ctx, int i, TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
if (i < 0 || i >= cc_ctx->num_outputs()) {
TF_SetStatus(status, TF_OUT_OF_RANGE, "output index out of range");
return false;
}
TF_SetStatus(status, TF_OK, "");
return cc_ctx->output_memory_type(i) == tensorflow::HOST_MEMORY;
}
int64_t TF_StepId(TF_OpKernelContext* ctx) {
return reinterpret_cast<::tensorflow::OpKernelContext*>(ctx)->step_id();
}
TF_Buffer* TF_OpKernelConstruction_GetNodeDef(TF_OpKernelConstruction* ctx,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelConstruction*>(ctx);
TF_Buffer* ret = TF_NewBuffer();
status->status = MessageToBuffer(cc_ctx->def(), ret);
if (!status->status.ok()) {
TF_DeleteBuffer(ret);
return nullptr;
}
return ret;
}
uint64_t TF_GetFrameId(TF_OpKernelContext* ctx) {
return reinterpret_cast<::tensorflow::OpKernelContext*>(ctx)
->frame_iter()
.frame_id;
}
int TF_GetGraphDefVersion(TF_OpKernelContext* ctx) {
return reinterpret_cast<::tensorflow::OpKernelContext*>(ctx)
->function_library()
->graph_def_version();
}
int64_t TF_GetIterId(TF_OpKernelContext* ctx) {
return reinterpret_cast<::tensorflow::OpKernelContext*>(ctx)
->frame_iter()
.iter_id;
}
int64_t TF_GetStepId(TF_OpKernelContext* ctx) {
return reinterpret_cast<::tensorflow::OpKernelContext*>(ctx)->step_id();
}
int TF_GetDeviceId(TF_OpKernelContext* ctx) {
const tensorflow::DeviceBase* device_base =
reinterpret_cast<tensorflow::OpKernelContext*>(ctx)->device();
#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD)
if (!device_base->parsed_name().has_id) return -1;
return device_base->parsed_name().id;
#else
const auto* device = reinterpret_cast<const tensorflow::Device*>(
device_base->UnderlyingDevice());
return tsl::GetDeviceIdFromDeviceParsedName(device->parsed_name());
#endif
}
TF_StringView TF_GetDeviceName(TF_OpKernelContext* ctx) {
const auto& device_name =
reinterpret_cast<tensorflow::OpKernelContext*>(ctx)->device()->name();
TF_StringView device_name_sv;
device_name_sv.data = device_name.data();
device_name_sv.len = device_name.length();
return device_name_sv;
}
#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
TF_RendezvousThunk TF_GetRendezvous(TF_OpKernelContext* ctx) {
TF_RendezvousThunk* thunk =
ToC(reinterpret_cast<tensorflow::OpKernelContext*>(ctx)->rendezvous());
TF_RendezvousThunk res = *thunk;
delete thunk;
return res;
}
#endif
TF_StringView TF_GetOpKernelName(TF_OpKernelContext* ctx) {
auto cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
TF_StringView opkernel_name_sv;
opkernel_name_sv.data = cc_ctx->op_kernel().name().data();
opkernel_name_sv.len = cc_ctx->op_kernel().name().length();
return opkernel_name_sv;
}
TF_StringView TF_GetResourceMgrDefaultContainerName(TF_OpKernelContext* ctx) {
auto cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
TF_StringView default_container_name_sv;
default_container_name_sv.data =
cc_ctx->resource_manager()->default_container().data();
default_container_name_sv.len =
cc_ctx->resource_manager()->default_container().length();
return default_container_name_sv;
}
TF_StringView TF_GetOpKernelRequestedInput(TF_OpKernelContext* ctx,
size_t index) {
auto cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
TF_StringView requested_input_sv;
requested_input_sv.data = cc_ctx->op_kernel().requested_input(index).data();
requested_input_sv.len = cc_ctx->op_kernel().requested_input(index).length();
return requested_input_sv;
}
TF_Tensor* TF_AllocateOutput(TF_OpKernelContext* context, int index,
TF_DataType dtype, const int64_t* dims,
int num_dims, size_t len, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(context);
tensorflow::gtl::ArraySlice<const int64_t> dimarray(
reinterpret_cast<const int64_t*>(dims), num_dims);
tensorflow::Tensor* tensor;
absl::Status s = cc_ctx->allocate_output(
index, tensorflow::TensorShape(dimarray), &tensor);
if (!s.ok()) {
::tensorflow::Set_TF_Status_from_Status(status, s);
return nullptr;
}
TF_Tensor* tf_tensor = TF_TensorFromTensor(*tensor, &s);
if (!s.ok()) {
::tensorflow::Set_TF_Status_from_Status(status, s);
return nullptr;
}
return tf_tensor;
}
TF_Tensor* TF_ForwardInputOrAllocateOutput(
TF_OpKernelContext* context, const int* candidate_input_indices,
int num_candidate_input_indices, int output_index,
const int64_t* output_dims, int output_num_dims, int* forwarded_input,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(context);
tensorflow::gtl::ArraySlice<int> input_indices_array(
candidate_input_indices, num_candidate_input_indices);
tensorflow::gtl::ArraySlice<const int64_t> output_dimarray(
reinterpret_cast<const int64_t*>(output_dims), output_num_dims);
tensorflow::Tensor* output_tensor_pointer;
absl::Status s = cc_ctx->forward_input_or_allocate_output(
input_indices_array, output_index,
tensorflow::TensorShape(output_dimarray), &output_tensor_pointer,
forwarded_input);
if (!s.ok()) {
::tensorflow::Set_TF_Status_from_Status(status, s);
return nullptr;
}
TF_Tensor* tf_tensor_output = TF_TensorFromTensor(*output_tensor_pointer, &s);
if (!s.ok()) {
::tensorflow::Set_TF_Status_from_Status(status, s);
return nullptr;
}
return tf_tensor_output;
}
TF_Tensor* TF_AllocateTemp(TF_OpKernelContext* context, TF_DataType dtype,
const int64_t* dims, int num_dims,
TF_AllocatorAttributes* alloc_attrs,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(context);
TF_SetStatus(status, TF_OK, "");
tensorflow::gtl::ArraySlice<const int64_t> dimarray(
reinterpret_cast<const int64_t*>(dims), num_dims);
if (alloc_attrs && !alloc_attrs->struct_size) {
TF_SetStatus(
status, TF_INVALID_ARGUMENT,
"TF_AllocatorAttributes struct "
"size member must be set to TF_ALLOCATOR_ATTRIBUTES_STRUCT_SIZE");
return nullptr;
}
tensorflow::AllocatorAttributes allocator_attr;
if (alloc_attrs && alloc_attrs->on_host) {
allocator_attr.set_on_host(true);
}
absl::Status s;
tensorflow::Tensor tensor;
s = cc_ctx->allocate_temp(static_cast<tensorflow::DataType>(dtype),
tensorflow::TensorShape(dimarray), &tensor,
allocator_attr);
if (!s.ok()) {
::tensorflow::Set_TF_Status_from_Status(status, s);
return nullptr;
}
TF_Tensor* tf_tensor;
tf_tensor = TF_TensorFromTensor(tensor, &s);
if (!s.ok()) {
::tensorflow::Set_TF_Status_from_Status(status, s);
return nullptr;
}
return tf_tensor;
}
void TF_IncNumDeferredOps(TF_OpKernelContext* context) {
tensorflow::OpKernelContext* cc_ctx =
reinterpret_cast<::tensorflow::OpKernelContext*>(context);
cc_ctx->inc_num_deferred_ops_function()();
}
void TF_DecNumDeferredOps(TF_OpKernelContext* context) {
tensorflow::OpKernelContext* cc_ctx =
reinterpret_cast<::tensorflow::OpKernelContext*>(context);
cc_ctx->dec_num_deferred_ops_function()();
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/c/kernels.h"
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_format.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
struct MyCustomKernel {
bool created;
bool compute_called;
};
static bool delete_called = false;
static bool async_kernel_done = false;
static void* MyCreateFunc(TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
TF_DataType type;
TF_Status* status = TF_NewStatus();
TF_OpKernelConstruction_GetAttrType(ctx, "SomeDataTypeAttr", &type, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
EXPECT_EQ(TF_FLOAT, type);
TF_DeleteStatus(status);
TF_StringView name_string_view = TF_OpKernelConstruction_GetName(ctx);
std::string node_name = "SomeNodeName";
std::string candidate_node_name =
std::string(name_string_view.data, name_string_view.len);
EXPECT_EQ(node_name, candidate_node_name);
return s;
}
static void MyComputeFunc(void* kernel, TF_OpKernelContext* ctx) {
struct MyCustomKernel* s = static_cast<struct MyCustomKernel*>(kernel);
s->compute_called = true;
if (ctx != nullptr) {
EXPECT_EQ(43, TF_StepId(ctx));
}
}
static void MyAsyncComputeFunc(void* kernel, TF_OpKernelContext* ctx,
TF_AsyncOpKernelDoneCallback* done) {
struct MyCustomKernel* s = static_cast<struct MyCustomKernel*>(kernel);
TF_RunAsyncOpKernelDoneCallback(done);
s->compute_called = true;
if (ctx != nullptr) {
EXPECT_EQ(43, TF_StepId(ctx));
}
}
static void MyDeleteFunc(void* kernel) {
struct MyCustomKernel* s = static_cast<struct MyCustomKernel*>(kernel);
EXPECT_TRUE(s->created);
EXPECT_TRUE(s->compute_called);
delete_called = true;
delete s;
}
namespace tensorflow {
Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst);
static std::unique_ptr<OpKernel> GetFakeKernel(const char* device_name,
const char* op_name,
const char* node_name,
Status* status) {
NodeDef def;
def.set_op(op_name);
def.set_name(node_name);
def.set_device(device_name);
def.add_input("input1");
def.add_input("input2");
AttrValue v;
v.set_type(DataType::DT_FLOAT);
(*def.mutable_attr())["SomeDataTypeAttr"] = v;
return CreateOpKernel(DeviceType(device_name), nullptr, nullptr, def, 1,
status);
}
static std::unique_ptr<OpKernel> GetFakeKernel2(const char* device_name,
const char* op_name,
const char* node_name,
Status* status) {
NodeDef def;
def.set_op(op_name);
def.set_name(node_name);
def.set_device(device_name);
def.add_input("input1");
def.add_input("input2");
def.add_input("input3");
def.add_input("input3");
def.add_input("input3");
AttrValue v0;
v0.set_type(DataType::DT_INT32);
v0.set_i(3);
(*def.mutable_attr())["NumInput3"] = v0;
AttrValue v1;
v1.set_type(DataType::DT_FLOAT);
(*def.mutable_attr())["SomeDataTypeAttr"] = v1;
return CreateOpKernel(DeviceType(device_name), nullptr, nullptr, def, 1,
status);
}
TEST(TestKernel, TestRegisterKernelBuilder) {
const char* node_name = "SomeNodeName";
const char* op_name = "FooOp";
const char* device_name = "FakeDeviceName1";
REGISTER_OP(op_name)
.Input("input1: double")
.Input("input2: uint8")
.Output("output1: uint8")
.Attr("SomeDataTypeAttr: type");
TF_KernelBuilder* builder = TF_NewKernelBuilder(
op_name, device_name, &MyCreateFunc, &MyComputeFunc, &MyDeleteFunc);
{
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_Buffer* buf = TF_GetRegisteredKernelsForOp(op_name, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
KernelList list;
list.ParseFromArray(buf->data, buf->length);
ASSERT_EQ(1, list.kernel_size());
ASSERT_EQ(device_name, list.kernel(0).device_type());
TF_DeleteBuffer(buf);
TF_DeleteStatus(status);
}
{
Status status;
std::unique_ptr<OpKernel> kernel =
GetFakeKernel(device_name, op_name, node_name, &status);
TF_EXPECT_OK(status);
ASSERT_NE(nullptr, kernel.get());
kernel->Compute(nullptr);
}
ASSERT_TRUE(delete_called);
}
TEST(TestKernel, TF_RegisterKernelBuilderWithKernelDef) {
const char* node_name = "SomeNodeName";
const char* op_name = "FooOp1";
const char* device_name = "FakeDeviceName2";
REGISTER_OP(op_name)
.Input("input1: double")
.Input("input2: uint8")
.Output("output1: uint8")
.Attr("SomeDataTypeAttr: type");
TF_KernelBuilder* builder = TF_NewKernelBuilder(
op_name, device_name, &MyCreateFunc, &MyComputeFunc, &MyDeleteFunc);
KernelDef kernel_def;
kernel_def.set_op(op_name);
kernel_def.set_device_type(device_name);
std::string kernel_def_str = kernel_def.SerializePartialAsString();
{
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilderWithKernelDef(kernel_def_str.data(), node_name,
builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_Buffer* buf = TF_GetRegisteredKernelsForOp(op_name, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
KernelList list;
list.ParseFromArray(buf->data, buf->length);
ASSERT_EQ(1, list.kernel_size());
ASSERT_EQ(device_name, list.kernel(0).device_type());
TF_DeleteBuffer(buf);
TF_DeleteStatus(status);
}
{
Status status;
std::unique_ptr<OpKernel> kernel =
GetFakeKernel(device_name, op_name, node_name, &status);
TF_EXPECT_OK(status);
ASSERT_NE(nullptr, kernel.get());
kernel->Compute(nullptr);
}
ASSERT_TRUE(delete_called);
}
TEST(TestKernel, TestRegisterAsyncKernelBuilder) {
const char* node_name = "SomeNodeName";
const char* op_name = "AsyncFooOp";
const char* device_name = "FakeDeviceName1";
REGISTER_OP(op_name)
.Input("input1: double")
.Input("input2: uint8")
.Output("output1: uint8")
.Attr("SomeDataTypeAttr: type");
TF_KernelBuilder* builder = TF_NewAsyncKernelBuilder(
op_name, device_name, &MyCreateFunc, &MyAsyncComputeFunc, &MyDeleteFunc);
{
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_Buffer* buf = TF_GetRegisteredKernelsForOp(op_name, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
KernelList list;
list.ParseFromArray(buf->data, buf->length);
ASSERT_EQ(1, list.kernel_size());
ASSERT_EQ(device_name, list.kernel(0).device_type());
TF_DeleteBuffer(buf);
TF_DeleteStatus(status);
}
{
Status status;
std::unique_ptr<OpKernel> kernel =
GetFakeKernel(device_name, op_name, node_name, &status);
TF_EXPECT_OK(status);
ASSERT_NE(nullptr, kernel.get());
auto done = []() { async_kernel_done = true; };
down_cast<AsyncOpKernel*>(kernel.get())->ComputeAsync(nullptr, done);
}
ASSERT_TRUE(async_kernel_done);
ASSERT_TRUE(delete_called);
}
#define ATTR_TEST_REGISTER_OP(name, type) \
REGISTER_OP("TestKernelAttr" #name) \
.Attr("Attr: " #type) \
.SetShapeFn(tensorflow::shape_inference::UnknownShape); \
REGISTER_OP("TestKernelAttr" #name "List") \
.Attr("Attr: list(" #type ")") \
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
ATTR_TEST_REGISTER_OP(String, string);
ATTR_TEST_REGISTER_OP(Int, int);
ATTR_TEST_REGISTER_OP(Float, float);
ATTR_TEST_REGISTER_OP(Bool, bool);
ATTR_TEST_REGISTER_OP(Type, type);
ATTR_TEST_REGISTER_OP(Tensor, tensor);
#undef ATTR_TEST_REGISTER_OP
#define EXPECT_TF_SIZE(attr_name, expected_list_size, expected_total_size) \
do { \
int32_t list_size, total_size; \
TF_OpKernelConstruction_GetAttrSize(ctx, attr_name, &list_size, \
&total_size, status); \
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); \
EXPECT_EQ(expected_list_size, list_size); \
EXPECT_EQ(expected_total_size, total_size); \
} while (0)
typedef void* (*MyCreateFuncWithAttr)(TF_OpKernelConstruction*);
class TestKernelAttr : public ::testing::Test {
public:
TestKernelAttr() {}
~TestKernelAttr() override {}
std::unique_ptr<OpKernel> GetFakeKernelWithAttr(const char* op_name,
AttrValue v, Status* status) {
NodeDef def;
def.set_op(op_name);
def.set_name("FakeNode");
def.set_device("FakeDevice");
(*def.mutable_attr())["Attr"] = v;
return CreateOpKernel(DeviceType("FakeDevice"), nullptr, nullptr, def, 1,
status);
}
void CreateAndCallKernelWithAttr(MyCreateFuncWithAttr MyCreateFuncAttr,
const char* op_name, AttrValue& v) {
TF_KernelBuilder* builder = TF_NewKernelBuilder(
op_name, "FakeDevice", MyCreateFuncAttr, &MyComputeFunc, &MyDeleteFunc);
{
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder("FakeNode", builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
}
Status status;
std::unique_ptr<OpKernel> kernel =
GetFakeKernelWithAttr(op_name, v, &status);
TF_EXPECT_OK(status);
ASSERT_NE(nullptr, kernel.get());
kernel->Compute(nullptr);
ASSERT_TRUE(delete_called);
}
};
TEST_F(TestKernelAttr, GetNodeDef) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
TF_Status* status = TF_NewStatus();
TF_Buffer* node_def_buf = TF_OpKernelConstruction_GetNodeDef(ctx, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
NodeDef node_def;
node_def.ParseFromArray(node_def_buf->data, node_def_buf->length);
EXPECT_EQ(node_def.op(), "TestKernelAttrGetNodeDef");
EXPECT_EQ(node_def.name(), "FakeNode");
EXPECT_EQ(node_def.device(), "FakeDevice");
EXPECT_EQ(node_def.attr_size(), 1);
const ::tensorflow::AttrValue& value = node_def.attr().at("Attr");
EXPECT_TRUE(value.value_case() == ::tensorflow::AttrValue::ValueCase::kI);
EXPECT_EQ(value.i(), 1234);
TF_DeleteBuffer(node_def_buf);
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
REGISTER_OP("TestKernelAttrGetNodeDef")
.Attr("Attr: int")
.SetShapeFn(tensorflow::shape_inference::UnknownShape);
AttrValue v;
v.set_i(1234);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrGetNodeDef", v);
}
TEST_F(TestKernelAttr, String) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
std::unique_ptr<char[]> val(new char[5]);
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", -1,
5);
TF_OpKernelConstruction_GetAttrString(ctx, "Attr", val.get(),
5, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ("bunny", string(static_cast<const char*>(val.get()), 5));
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
v.set_s("bunny");
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrString", v);
}
TEST_F(TestKernelAttr, StringList) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
std::vector<string> list = {"bugs", "bunny", "duck"};
int list_total_size = 0;
for (const auto& s : list) {
list_total_size += s.size();
}
TF_Status* status = TF_NewStatus();
std::unique_ptr<char*[]> values(new char*[list.size()]);
std::unique_ptr<size_t[]> lens(new size_t[list.size()]);
std::unique_ptr<char[]> storage(new char[list_total_size]);
EXPECT_TF_SIZE( "Attr", list.size(),
list_total_size);
TF_OpKernelConstruction_GetAttrStringList(
ctx, "Attr", values.get(), lens.get(), list.size(), storage.get(),
list_total_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (size_t i = 0; i < list.size(); ++i) {
EXPECT_EQ(list[i].size(), lens[i]) << i;
EXPECT_EQ(list[i], string(static_cast<const char*>(values[i]), lens[i]))
<< i;
}
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
std::string attr_in[] = {"bugs", "bunny", "duck"};
SetAttrValue(absl::Span<const std::string>(attr_in, 3), &v);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrStringList", v);
}
TEST_F(TestKernelAttr, Tensor) {
struct TensorProtoHelpers {
public:
static ::tensorflow::TensorProto GenerateTensorProto() {
::tensorflow::TensorProto tensor_proto;
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(3);
tensor_proto.set_dtype(DT_INT32);
tensor_proto.add_int_val(1);
tensor_proto.add_int_val(2);
tensor_proto.add_int_val(3);
tensor_proto.add_int_val(4);
tensor_proto.add_int_val(5);
tensor_proto.add_int_val(6);
return tensor_proto;
}
};
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
TF_Tensor* val;
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", -1,
-1);
TF_OpKernelConstruction_GetAttrTensor(ctx, "Attr", &val, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
::tensorflow::Tensor expected_tensor;
EXPECT_TRUE(
expected_tensor.FromProto(TensorProtoHelpers::GenerateTensorProto()));
::tensorflow::Tensor actual_tensor;
EXPECT_TRUE(TF_TensorToTensor(val, &actual_tensor).ok());
EXPECT_EQ(actual_tensor.tensor_data(), expected_tensor.tensor_data());
EXPECT_EQ(actual_tensor.shape(), expected_tensor.shape());
EXPECT_EQ(actual_tensor.dtype(), expected_tensor.dtype());
TF_DeleteStatus(status);
TF_DeleteTensor(val);
return static_cast<void*>(s);
};
AttrValue v;
::tensorflow::TensorProto* tensor_proto = v.mutable_tensor();
*tensor_proto = TensorProtoHelpers::GenerateTensorProto();
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrTensor", v);
}
TEST_F(TestKernelAttr, TensorList) {
struct TensorProtoHelpers {
public:
static ::tensorflow::TensorProto GenerateTensorProto1() {
::tensorflow::TensorProto tensor_proto;
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
tensor_proto.set_dtype(DT_INT32);
tensor_proto.add_int_val(1);
tensor_proto.add_int_val(2);
tensor_proto.add_int_val(3);
tensor_proto.add_int_val(4);
return tensor_proto;
}
static ::tensorflow::TensorProto GenerateTensorProto2() {
::tensorflow::TensorProto tensor_proto;
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(3);
tensor_proto.set_dtype(DT_FLOAT);
tensor_proto.add_float_val(5.0f);
tensor_proto.add_float_val(6.0f);
tensor_proto.add_float_val(7.0f);
tensor_proto.add_float_val(8.0f);
tensor_proto.add_float_val(9.0f);
tensor_proto.add_float_val(10.0f);
return tensor_proto;
}
};
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
const size_t list_size = 2;
TF_Tensor* values[list_size];
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", list_size,
-1);
TF_OpKernelConstruction_GetAttrTensorList(ctx, "Attr", values, list_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
::tensorflow::Tensor expected_tensor1;
EXPECT_TRUE(
expected_tensor1.FromProto(TensorProtoHelpers::GenerateTensorProto1()));
::tensorflow::Tensor actual_tensor1;
EXPECT_TRUE(TF_TensorToTensor(values[0], &actual_tensor1).ok());
EXPECT_EQ(actual_tensor1.tensor_data(), expected_tensor1.tensor_data());
EXPECT_EQ(actual_tensor1.shape(), expected_tensor1.shape());
EXPECT_EQ(actual_tensor1.dtype(), expected_tensor1.dtype());
::tensorflow::Tensor expected_tensor2;
EXPECT_TRUE(
expected_tensor2.FromProto(TensorProtoHelpers::GenerateTensorProto2()));
::tensorflow::Tensor actual_tensor2;
EXPECT_TRUE(TF_TensorToTensor(values[1], &actual_tensor2).ok());
EXPECT_EQ(actual_tensor2.tensor_data(), expected_tensor2.tensor_data());
EXPECT_EQ(actual_tensor2.shape(), expected_tensor2.shape());
EXPECT_EQ(actual_tensor2.dtype(), expected_tensor2.dtype());
TF_DeleteStatus(status);
TF_DeleteTensor(values[0]);
TF_DeleteTensor(values[1]);
return static_cast<void*>(s);
};
AttrValue v;
::tensorflow::TensorProto* tensor_proto1 = v.mutable_list()->add_tensor();
*tensor_proto1 = TensorProtoHelpers::GenerateTensorProto1();
::tensorflow::TensorProto* tensor_proto2 = v.mutable_list()->add_tensor();
*tensor_proto2 = TensorProtoHelpers::GenerateTensorProto2();
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrTensorList", v);
}
TEST_F(TestKernelAttr, Int) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
int64_t val;
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", -1,
-1);
TF_OpKernelConstruction_GetAttrInt64(ctx, "Attr", &val, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(1234, val);
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
v.set_i(1234);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrInt", v);
}
TEST_F(TestKernelAttr, IntList) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
const int64_t list[] = {1, 2, 3, 4};
const size_t list_size = TF_ARRAYSIZE(list);
int64_t values[list_size];
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", list_size,
-1);
TF_OpKernelConstruction_GetAttrInt64List(ctx, "Attr", values, list_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_TRUE(
std::equal(std::begin(list), std::end(list), std::begin(values)));
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
int64_t attr_in[] = {1, 2, 3, 4};
SetAttrValue(absl::Span<const int64_t>(attr_in, 4), &v);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrIntList", v);
}
TEST_F(TestKernelAttr, Float) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
float val;
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", -1,
-1);
TF_OpKernelConstruction_GetAttrFloat(ctx, "Attr", &val, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_FLOAT_EQ(2.718, val);
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
v.set_f(2.718);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrFloat", v);
}
TEST_F(TestKernelAttr, FloatList) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
const float list[] = {1.414, 2.718, 3.1415};
const size_t list_size = TF_ARRAYSIZE(list);
float values[list_size];
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", list_size,
-1);
TF_OpKernelConstruction_GetAttrFloatList(ctx, "Attr", values, list_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_TRUE(
std::equal(std::begin(list), std::end(list), std::begin(values)));
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
float attr_in[] = {1.414, 2.718, 3.1415};
SetAttrValue(absl::Span<const float>(attr_in, 3), &v);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrFloatList", v);
}
TEST_F(TestKernelAttr, Bool) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
unsigned char val;
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", -1,
-1);
TF_OpKernelConstruction_GetAttrBool(ctx, "Attr", &val, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(1, val);
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
v.set_b(true);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrBool", v);
}
TEST_F(TestKernelAttr, BoolList) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
const unsigned char list[] = {1, 0, 1, 0};
const size_t list_size = TF_ARRAYSIZE(list);
unsigned char values[list_size];
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", list_size,
-1);
TF_OpKernelConstruction_GetAttrBoolList(ctx, "Attr", values, list_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_TRUE(
std::equal(std::begin(list), std::end(list), std::begin(values)));
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
bool attr_in[] = {true, false, true, false};
SetAttrValue(absl::Span<const bool>(attr_in, 4), &v);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrBoolList", v);
}
TEST_F(TestKernelAttr, Type) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
TF_DataType val;
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", -1,
-1);
TF_OpKernelConstruction_GetAttrType(ctx, "Attr", &val, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(TF_FLOAT, val);
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
v.set_type(DT_FLOAT);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrType", v);
}
TEST_F(TestKernelAttr, TypeList) {
auto my_create_func = [](TF_OpKernelConstruction* ctx) {
struct MyCustomKernel* s = new struct MyCustomKernel;
s->created = true;
s->compute_called = false;
const TF_DataType list[] = {TF_FLOAT, TF_DOUBLE, TF_HALF, TF_COMPLEX128};
const size_t list_size = TF_ARRAYSIZE(list);
TF_DataType values[list_size];
TF_Status* status = TF_NewStatus();
EXPECT_TF_SIZE( "Attr", list_size,
-1);
TF_OpKernelConstruction_GetAttrTypeList(ctx, "Attr", values, list_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_TRUE(
std::equal(std::begin(list), std::end(list), std::begin(values)));
TF_DeleteStatus(status);
return static_cast<void*>(s);
};
AttrValue v;
DataType attr_in[] = {DT_FLOAT, DT_DOUBLE, DT_HALF, DT_COMPLEX128};
SetAttrValue(absl::Span<const DataType>(attr_in, 4), &v);
CreateAndCallKernelWithAttr(my_create_func, "TestKernelAttrTypeList", v);
}
#undef EXPECT_TF_SIZE
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {}
Allocator* GetAllocator(AllocatorAttributes ) override {
return cpu_allocator();
}
};
TEST(TestKernel, TestInputAndOutputCount) {
const char* node_name = "InputOutputCounterKernel";
const char* op_name = "BarOp";
const char* device_name = "FakeDeviceName2";
REGISTER_OP(op_name)
.Input("input1: double")
.Input("input2: uint8")
.Output("output1: uint8")
.Attr("SomeDataTypeAttr: type");
static int num_inputs = 0;
static int num_outputs = 0;
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
num_inputs = TF_NumInputs(ctx);
num_outputs = TF_NumOutputs(ctx);
TF_Tensor* input = nullptr;
TF_Status* s = TF_NewStatus();
TF_GetInput(ctx, 0, &input, s);
EXPECT_EQ(TF_OK, TF_GetCode(s)) << "Failed to get input: " << TF_Message(s);
EXPECT_EQ(123, *static_cast<tensorflow::uint8*>(TF_TensorData(input)));
TF_GetInput(ctx, -1, &input, s);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s));
TF_GetInput(ctx, 3, &input, s);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s));
TF_SetOutput(ctx, 0, input, s);
EXPECT_EQ(TF_OK, TF_GetCode(s));
TF_SetOutput(ctx, 24, input, s);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s));
EXPECT_EQ(TF_UINT8, TF_ExpectedOutputDataType(ctx, 0));
EXPECT_DEATH({ TF_ExpectedOutputDataType(ctx, 1); },
"Check failed: i < cc_ctx->num_outputs");
EXPECT_DEATH({ TF_ExpectedOutputDataType(ctx, -1); },
"Check failed: i >= 0");
TF_DeleteStatus(s);
if (input != nullptr) {
TF_DeleteTensor(input);
}
};
TF_KernelBuilder* builder = TF_NewKernelBuilder(op_name, device_name, nullptr,
my_compute_func, nullptr);
{
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
}
{
OpKernelContext::Params p;
DummyDevice dummy_device(nullptr);
p.device = &dummy_device;
p.step_id = 43;
Tensor t(tensorflow::uint8(123));
absl::InlinedVector<TensorValue, 4UL> inputs;
inputs.emplace_back(&t);
inputs.emplace_back();
p.inputs = inputs;
Status status;
std::unique_ptr<OpKernel> kernel =
GetFakeKernel(device_name, op_name, node_name, &status);
TF_EXPECT_OK(status);
ASSERT_NE(nullptr, kernel.get());
p.op_kernel = kernel.get();
OpKernelContext ctx(&p);
kernel->Compute(&ctx);
ASSERT_EQ(2, num_inputs);
ASSERT_EQ(1, num_outputs);
ASSERT_EQ(123, ctx.mutable_output(0)->scalar<tensorflow::uint8>()());
}
}
TEST(TestKernel, DeleteKernelBuilderIsOkOnNull) {
TF_DeleteKernelBuilder(nullptr);
}
std::string ExpectedString(const char* type) {
const auto format_str = R"str(kernel {
op: "TypeOp%s"
device_type: "FakeDeviceName1"
constraint {
name: "T"
allowed_values {
list {
type: %s
}
}
}
}
)str";
return absl::StrFormat(format_str, type, type);
}
#define TEST_KERNEL_TYPE_CONSTRAINT(tf_type, dtype) \
TEST(TestKernel, TestTypeConstraint##tf_type) { \
const char* node_name = "SomeNodeName"; \
const char* op_name = "TypeOp" #dtype; \
const char* device_name = "FakeDeviceName1"; \
\
REGISTER_OP(op_name) \
.Input("input1: double") \
.Input("input2: uint8") \
.Output("output1: uint8") \
.Attr("T: type"); \
\
TF_KernelBuilder* builder = TF_NewKernelBuilder( \
op_name, device_name, &MyCreateFunc, &MyComputeFunc, &MyDeleteFunc); \
TF_Status* status = TF_NewStatus(); \
TF_KernelBuilder_TypeConstraint(builder, "T", TF_DataType::tf_type, \
status); \
EXPECT_EQ(TF_OK, TF_GetCode(status)); \
TF_RegisterKernelBuilder(node_name, builder, status); \
EXPECT_EQ(TF_OK, TF_GetCode(status)); \
\
TF_Buffer* buf = TF_GetRegisteredKernelsForOp(op_name, status); \
EXPECT_EQ(TF_OK, TF_GetCode(status)); \
KernelList list; \
list.ParseFromArray(buf->data, buf->length); \
KernelList expected_proto; \
protobuf::TextFormat::ParseFromString(ExpectedString(#dtype), \
&expected_proto); \
ASSERT_EQ(expected_proto.DebugString(), list.DebugString()); \
\
TF_DeleteBuffer(buf); \
TF_DeleteStatus(status); \
TF_DeleteKernelBuilder(builder); \
ASSERT_TRUE(delete_called); \
}
TEST_KERNEL_TYPE_CONSTRAINT(TF_HALF, DT_HALF);
TEST_KERNEL_TYPE_CONSTRAINT(TF_BFLOAT16, DT_BFLOAT16);
TEST_KERNEL_TYPE_CONSTRAINT(TF_FLOAT, DT_FLOAT);
TEST_KERNEL_TYPE_CONSTRAINT(TF_DOUBLE, DT_DOUBLE);
TEST_KERNEL_TYPE_CONSTRAINT(TF_UINT64, DT_UINT64);
TEST_KERNEL_TYPE_CONSTRAINT(TF_UINT32, DT_UINT32);
TEST_KERNEL_TYPE_CONSTRAINT(TF_UINT16, DT_UINT16);
TEST_KERNEL_TYPE_CONSTRAINT(TF_UINT8, DT_UINT8);
TEST_KERNEL_TYPE_CONSTRAINT(TF_INT8, DT_INT8);
TEST_KERNEL_TYPE_CONSTRAINT(TF_INT32, DT_INT32);
TEST_KERNEL_TYPE_CONSTRAINT(TF_COMPLEX64, DT_COMPLEX64);
TEST_KERNEL_TYPE_CONSTRAINT(TF_COMPLEX128, DT_COMPLEX128);
TEST_KERNEL_TYPE_CONSTRAINT(TF_QINT8, DT_QINT8);
TEST_KERNEL_TYPE_CONSTRAINT(TF_QUINT8, DT_QUINT8);
TEST_KERNEL_TYPE_CONSTRAINT(TF_QINT32, DT_QINT32);
TEST_KERNEL_TYPE_CONSTRAINT(TF_QINT16, DT_QINT16);
TEST_KERNEL_TYPE_CONSTRAINT(TF_QUINT16, DT_QUINT16);
TEST(TestKernel, TestHostMemory) {
const char* node_name = "SomeNodeName";
const char* op_name = "HostMemoryOp";
const char* device_name = "FakeDeviceName1";
REGISTER_OP(op_name)
.Input("input1: double")
.Input("input2: uint8")
.Output("output1: uint8")
.Output("output2: uint8")
.Attr("T: type");
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
MyComputeFunc(kernel, ctx);
TF_Status* status = TF_NewStatus();
TF_SetStatus(status, TF_OK, "");
EXPECT_EQ(false, TF_IsHostMemoryInput(ctx, 0, status));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
EXPECT_EQ(true, TF_IsHostMemoryInput(ctx, 1, status));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
EXPECT_EQ(true, TF_IsHostMemoryOutput(ctx, 0, status));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
EXPECT_EQ(false, TF_IsHostMemoryOutput(ctx, 1, status));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_IsHostMemoryInput(ctx, -1, status);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_IsHostMemoryInput(ctx, 2, status);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_IsHostMemoryOutput(ctx, -1, status);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_IsHostMemoryOutput(ctx, 2, status);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(status));
TF_DeleteStatus(status);
};
TF_KernelBuilder* builder = TF_NewKernelBuilder(
op_name, device_name, &MyCreateFunc, my_compute_func, &MyDeleteFunc);
TF_KernelBuilder_HostMemory(builder, "input2");
TF_KernelBuilder_HostMemory(builder, "output1");
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_Buffer* buf = TF_GetRegisteredKernelsForOp(op_name, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
KernelList list;
list.ParseFromArray(buf->data, buf->length);
KernelList expected_proto;
protobuf::TextFormat::ParseFromString(
R"str(kernel {
op: "HostMemoryOp"
device_type: "FakeDeviceName1"
host_memory_arg: "input2"
host_memory_arg: "output1"
}
)str",
&expected_proto);
ASSERT_EQ(list.DebugString(), expected_proto.DebugString());
TF_DeleteBuffer(buf);
TF_DeleteStatus(status);
TF_DeleteKernelBuilder(builder);
ASSERT_TRUE(delete_called);
}
class DeviceKernelOpTest : public OpsTestBase {
protected:
void SetupOp(const char* op_name, const char* node_name,
void (*compute_func)(void*, TF_OpKernelContext*)) {
TF_KernelBuilder* builder = TF_NewKernelBuilder(
op_name, device_name_, nullptr, compute_func, nullptr);
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
std::unique_ptr<Device> device(
DeviceFactory::NewDevice(device_name_, {}, "/job:a/replica:0/task:0"));
OpsTestBase::SetDevice(DEVICE_GPU, std::move(device));
#endif
TF_ASSERT_OK(NodeDefBuilder(op_name, op_name).Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
const char* device_name_ = tensorflow::DEVICE_GPU;
#else
const char* device_name_ = tensorflow::DEVICE_CPU;
#endif
};
void validate_tensor(TF_Tensor* tensor, int64_t* dims, int64_t num_dims,
TF_DataType dtype);
template <typename T>
void set_tensor_data(TF_Tensor* tensor, T* values, size_t tensor_size_bytes,
TF_OpKernelContext* ctx);
REGISTER_OP("StreamOp").Output("output1: float");
TEST_F(DeviceKernelOpTest, TestStream) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
SP_Stream stream = TF_GetStream(ctx, s);
EXPECT_EQ(stream, nullptr);
EXPECT_NE(TF_OK, TF_GetCode(s));
TF_DeleteStatus(s);
};
SetupOp("StreamOp", "StreamOp", my_compute_func);
TF_ASSERT_OK(RunOpKernel());
}
REGISTER_OP("AllocateOutputOp1").Output("output1: float");
TEST_F(DeviceKernelOpTest, TestAllocateOutputSizeOne) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
int64_t dim = 1;
size_t tensor_size_bytes = TF_DataTypeSize(TF_FLOAT);
TF_Tensor* output = TF_AllocateOutput(
ctx, 0, TF_FLOAT, &dim,
1, tensor_size_bytes, s);
validate_tensor(output, &dim, 1, TF_FLOAT);
float values[1] = {3.0f};
set_tensor_data<float>(output, values, tensor_size_bytes, ctx);
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
SetupOp("AllocateOutputOp1", "AllocateOutput1", my_compute_func);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ("Tensor<type: float shape: [1] values: 3>",
output->DebugString(100));
}
REGISTER_OP("AllocateOutputOp0").Output("output1: float");
TEST_F(DeviceKernelOpTest, TestAllocateEmptyOutput) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
int64_t dim = 0;
TF_Tensor* output = TF_AllocateOutput(
ctx, 0, TF_FLOAT, &dim,
1, 0, s);
EXPECT_EQ(TF_OK, TF_GetCode(s));
validate_tensor(output, &dim, 1, TF_FLOAT);
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
SetupOp("AllocateOutputOp0", "AllocateOutput0", my_compute_func);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ("Tensor<type: float shape: [0] values: >",
output->DebugString(100));
}
REGISTER_OP("AllocateOutputOp2x3").Output("output1: float");
TEST_F(DeviceKernelOpTest, TestAllocateOutputSize2x3) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
int64_t dim[2] = {2, 3};
size_t tensor_size_bytes = TF_DataTypeSize(TF_FLOAT) * 6;
TF_Tensor* output = TF_AllocateOutput(
ctx, 0, TF_FLOAT, dim,
2, tensor_size_bytes, s);
EXPECT_EQ(TF_OK, TF_GetCode(s));
validate_tensor(output, dim, 2, TF_FLOAT);
float values[6] = {1, 2, 3, 4, 5, 6};
set_tensor_data<float>(output, values, tensor_size_bytes, ctx);
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
SetupOp("AllocateOutputOp2x3", "AllocateOutput2x3", my_compute_func);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ("Tensor<type: float shape: [2,3] values: [1 2 3][4 5 6]>",
output->DebugString(100));
}
REGISTER_OP("AllocateTempOp1").Output("output1: float");
TEST_F(DeviceKernelOpTest, TestAllocateTempSizeOne) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
int64_t dim = 1;
TF_AllocatorAttributes alloc_attrs;
alloc_attrs.struct_size = TF_ALLOCATOR_ATTRIBUTES_STRUCT_SIZE;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
alloc_attrs.on_host = 0;
#else
alloc_attrs.on_host = 1;
#endif
TF_Tensor* output = TF_AllocateTemp(
ctx, TF_FLOAT, &dim,
1, &alloc_attrs, s);
size_t tensor_size_bytes = TF_DataTypeSize(TF_FLOAT);
EXPECT_EQ(TF_OK, TF_GetCode(s));
validate_tensor(output, &dim, 1, TF_FLOAT);
float values[1] = {3.0f};
set_tensor_data<float>(output, values, tensor_size_bytes, ctx);
TF_SetOutput(ctx, 0, output, s);
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
SetupOp("AllocateTempOp1", "AllocateTemp1", my_compute_func);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ("Tensor<type: float shape: [1] values: 3>",
output->DebugString(100));
}
REGISTER_OP("AllocateTempOp0").Output("output1: float");
TEST_F(DeviceKernelOpTest, TestAllocateTempEmpty) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
int64_t dim = 0;
TF_AllocatorAttributes alloc_attrs;
alloc_attrs.struct_size = TF_ALLOCATOR_ATTRIBUTES_STRUCT_SIZE;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
alloc_attrs.on_host = 0;
#else
alloc_attrs.on_host = 1;
#endif
TF_Tensor* output = TF_AllocateTemp(
ctx, TF_FLOAT, &dim,
1, &alloc_attrs, s);
EXPECT_EQ(TF_OK, TF_GetCode(s));
validate_tensor(output, &dim, 1, TF_FLOAT);
TF_SetOutput(ctx, 0, output, s);
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
SetupOp("AllocateTempOp0", "AllocateTemp0", my_compute_func);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ("Tensor<type: float shape: [0] values: >",
output->DebugString(100));
}
REGISTER_OP("AllocateTempOp2x3").Output("output1: float");
TEST_F(DeviceKernelOpTest, TestAllocateTempSize2x3) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
size_t tensor_size_bytes = 6 * TF_DataTypeSize(TF_FLOAT);
int64_t dim[2] = {2, 3};
TF_AllocatorAttributes alloc_attrs;
alloc_attrs.struct_size = TF_ALLOCATOR_ATTRIBUTES_STRUCT_SIZE;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
alloc_attrs.on_host = 0;
#else
alloc_attrs.on_host = 1;
#endif
TF_Tensor* output = TF_AllocateTemp(
ctx, TF_FLOAT, dim,
2, &alloc_attrs, s);
EXPECT_EQ(TF_OK, TF_GetCode(s));
validate_tensor(output, dim, 2, TF_FLOAT);
float values[6] = {1, 2, 3, 4, 5, 6};
set_tensor_data<float>(output, values, tensor_size_bytes, ctx);
TF_SetOutput(ctx, 0, output, s);
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
SetupOp("AllocateTempOp2x3", "AllocateTempOp2x3", my_compute_func);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ("Tensor<type: float shape: [2,3] values: [1 2 3][4 5 6]>",
output->DebugString(100));
}
REGISTER_OP("DoNothingOp")
.Input("input1: float")
.Input("input2: float")
.Attr("NumInput3: int >= 0")
.Input("input3: NumInput3 * float")
.Output("output1: float")
.Attr("SomeDataTypeAttr: type");
TEST_F(DeviceKernelOpTest, TestGetKernelInfo) {
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
int64_t dim[1] = {1};
TF_AllocatorAttributes alloc_attrs;
alloc_attrs.struct_size = TF_ALLOCATOR_ATTRIBUTES_STRUCT_SIZE;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
alloc_attrs.on_host = 0;
#else
alloc_attrs.on_host = 1;
#endif
TF_StringView sv = TF_GetOpKernelName(ctx);
EXPECT_STREQ(sv.data, "TestGetKernelInfoNode");
sv = TF_GetOpKernelRequestedInput(ctx, 0);
EXPECT_STREQ(sv.data, "input1");
sv = TF_GetOpKernelRequestedInput(ctx, 1);
EXPECT_STREQ(sv.data, "input2");
TF_InputRange_Args args;
args.status = s;
TF_InputRange(ctx, "input3", &args);
EXPECT_EQ(TF_OK, TF_GetCode(s));
EXPECT_EQ(args.start, 2);
EXPECT_EQ(args.stop, 5);
TF_Tensor* output = TF_AllocateTemp(
ctx, TF_FLOAT, dim,
1, &alloc_attrs, s);
TF_SetOutput(ctx, 0, output, s);
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
const char* node_name = "TestGetKernelInfoNode";
const char* op_name = "DoNothingOp";
const char* device_name = "FakeDeviceName";
TF_KernelBuilder* builder = TF_NewKernelBuilder(op_name, device_name, nullptr,
my_compute_func, nullptr);
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
{
OpKernelContext::Params p;
DummyDevice dummy_device(nullptr);
p.device = &dummy_device;
AllocatorAttributes alloc_attrs;
p.output_attr_array = &alloc_attrs;
absl::InlinedVector<TensorValue, 4UL> inputs;
Tensor t0(1.0f);
Tensor t1(2.0f);
Tensor t2_0(2.0f);
Tensor t2_1(2.1f);
Tensor t2_2(2.2f);
inputs.emplace_back(&t0);
inputs.emplace_back(&t1);
inputs.emplace_back(&t2_0);
inputs.emplace_back(&t2_1);
inputs.emplace_back(&t2_2);
Status status;
std::unique_ptr<OpKernel> kernel =
GetFakeKernel2(device_name, op_name, node_name, &status);
TF_EXPECT_OK(status);
ASSERT_NE(nullptr, kernel.get());
p.op_kernel = kernel.get();
p.inputs = inputs;
OpKernelContext ctx(&p);
kernel->Compute(&ctx);
}
}
TEST_F(DeviceKernelOpTest, TestForwardInputOrAllocateOutput) {
const char* node_name = "TestForwardInputOrAllocateOutputKernel";
const char* op_name = "BazOp";
const char* device_name = "FakeDeviceName";
REGISTER_OP(op_name)
.Input("input1: float")
.Input("input2: float")
.Output("output1: float")
.Attr("SomeDataTypeAttr: type");
auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
TF_Status* s = TF_NewStatus();
int candidate_input_indices[1] = {0};
int forwarded_input;
int64_t output_dims[1] = {};
TF_Tensor* output = TF_ForwardInputOrAllocateOutput(
ctx, candidate_input_indices,
1,
0, output_dims, 0,
&forwarded_input, s);
EXPECT_EQ(TF_OK, TF_GetCode(s));
EXPECT_EQ(forwarded_input, 0);
EXPECT_EQ(TF_FLOAT, TF_TensorType(output));
EXPECT_EQ(0, TF_NumDims(output));
TF_DeleteStatus(s);
TF_DeleteTensor(output);
};
TF_KernelBuilder* builder = TF_NewKernelBuilder(op_name, device_name, nullptr,
my_compute_func, nullptr);
{
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
}
{
OpKernelContext::Params p;
DummyDevice dummy_device(nullptr);
p.device = &dummy_device;
AllocatorAttributes alloc_attrs;
p.output_attr_array = &alloc_attrs;
Tensor t(123.0f);
absl::InlinedVector<TensorValue, 4UL> inputs;
inputs.emplace_back(&t);
inputs.emplace_back();
p.inputs = inputs;
Status status;
std::unique_ptr<OpKernel> kernel =
GetFakeKernel(device_name, op_name, node_name, &status);
TF_EXPECT_OK(status);
ASSERT_NE(nullptr, kernel.get());
p.op_kernel = kernel.get();
OpKernelContext ctx(&p);
kernel->Compute(&ctx);
ASSERT_EQ(123, ctx.mutable_output(0)->scalar<float>()());
}
}
void validate_tensor(TF_Tensor* tensor, int64_t* dims, int64_t num_dims,
TF_DataType dtype) {
EXPECT_EQ(TF_FLOAT, TF_TensorType(tensor));
EXPECT_EQ(num_dims, TF_NumDims(tensor));
for (int i = 0; i < num_dims; ++i) {
EXPECT_EQ(dims[i], TF_Dim(tensor, i));
}
}
template <typename T>
void set_tensor_data(TF_Tensor* tensor, T* values, size_t tensor_size_bytes,
TF_OpKernelContext* ctx) {
T* data = reinterpret_cast<T*>(TF_TensorData(tensor));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
OpKernelContext* cc_ctx = reinterpret_cast<OpKernelContext*>(ctx);
cc_ctx->eigen_gpu_device().memcpyHostToDevice(data, values,
tensor_size_bytes);
#else
memcpy(data, values, tensor_size_bytes);
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
59953132-5105-4aa4-93e8-9e703673d57a | cpp | tensorflow/tensorflow | flat_map_dataset_op | tensorflow/core/kernels/data/flat_map_dataset_op.cc | tensorflow/core/kernels/data/flat_map_dataset_op_test.cc | #include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/flat_map_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const FlatMapDatasetOp::kDatasetType;
constexpr const char* const FlatMapDatasetOp::kInputDataset;
constexpr const char* const FlatMapDatasetOp::kOtherArguments;
constexpr const char* const FlatMapDatasetOp::kFunc;
constexpr const char* const FlatMapDatasetOp::kTarguments;
constexpr const char* const FlatMapDatasetOp::kOutputTypes;
constexpr const char* const FlatMapDatasetOp::kOutputShapes;
constexpr int64_t kMaxRandomIndexingCardinality = 100;
constexpr char kCycleLength[] = "cycle_length";
constexpr char kElementIndex[] = "element_index";
constexpr char kInputsSize[] = "inputs_size";
constexpr char kInputs[] = "inputs";
constexpr char kCurrentElementIteratorUninitialized[] =
"current_element_iterator_uninitialized";
constexpr char kExhausted[] = "exhausted";
class FlatMapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
output_types_(output_types),
output_shapes_(output_shapes),
random_access_handler_(ctx, input, *captured_func_) {
input_->Ref();
random_indexing_compatible_ = input_->RandomIndexingCompatible();
if (random_indexing_compatible_.ok() &&
input_->Cardinality() > kMaxRandomIndexingCardinality) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("The cardinality of the input to ", type_string(),
" is too large to support global shuffling. It is ",
input_->Cardinality(), ", which is greater than ",
kMaxRandomIndexingCardinality));
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (options.compute_level() <
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE) {
return kUnknownCardinality;
}
absl::StatusOr<int64_t> cardinality = random_access_handler_.Cardinality();
if (!cardinality.ok()) {
LOG(ERROR) << "Unable to compute cardinality for dataset "
<< DebugString() << " due to error: " << cardinality.status();
return kUnknownCardinality;
}
return *cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return absl::UnimplementedError(
"Please consider applying maps on each dataset, concatenating them "
"into "
"one dataset and apply global shuffle dataset op onto the "
"dataset to achieve the same result as flat map with global "
"shuffling.");
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {std::make_pair(0, input_graph_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f),
std::make_pair(kTarguments, other_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry());
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper()) {
return Get(ctx, out_tensors, end_of_sequence);
}
mutex_lock l(mu_);
do {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
TF_RETURN_IF_ERROR(current_element_iterator_->GetNext(
&nested_ctx, out_tensors, &end_of_element));
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
*end_of_sequence = false;
return absl::OkStatus();
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, true));
} while (true);
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
mutex_lock l(mu_);
*num_skipped = 0;
while (*num_skipped < num_to_skip) {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
int last_num_skipped;
TF_RETURN_IF_ERROR(current_element_iterator_->Skip(
&nested_ctx, num_to_skip - *num_skipped, &end_of_element,
&last_num_skipped));
*num_skipped += last_num_skipped;
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
if (*num_skipped != num_to_skip) {
return absl::InternalError(absl::StrFormat(
"Expected `num_skipped` and `num_to_skip` to be the same. Got"
" %d(num_skipped) and %d(num_to_skip)",
*num_skipped, num_to_skip));
}
continue;
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
}
*end_of_sequence = false;
return absl::OkStatus();
}
absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(size_t parent_index,
ctx->index_mapper()(element_count_));
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
absl::StatusOr<int64_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(dataset_index.status());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
next_positions_.resize(dataset_iterators_.size(), 0);
input_element_counts_.resize(dataset_iterators_.size(), 0);
}
IteratorContext::Params params(ctx);
params.index_mapper =
GetFlatMapIndexMapper(ctx->index_mapper(), *dataset_index);
IteratorContext global_shuffle_ctx(std::move(params));
TF_RETURN_IF_ERROR(dataset_iterators_[*dataset_index]->GetNext(
&global_shuffle_ctx, out_tensors, end_of_sequence));
ctx->MergeCheckpoint(global_shuffle_ctx.checkpoint());
++element_count_;
++input_element_counts_[*dataset_index];
return absl::OkStatus();
}
IndexMapperFn GetFlatMapIndexMapper(IndexMapperFn parent_index_mapper,
size_t input_dataset_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::StatusOr<int64_t> cardinality =
dataset()->random_access_handler_.Cardinality();
return [this, parent_index_mapper = std::move(parent_index_mapper),
input_dataset_index, cardinality = std::move(cardinality)](
size_t element_position) -> absl::StatusOr<size_t> {
if (!cardinality.ok() || *cardinality < 0) {
return absl::FailedPreconditionError(
"Global shuffling requires finite cardinalities.");
}
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
while (next_positions_[input_dataset_index] < *cardinality) {
size_t index = next_positions_[input_dataset_index];
if (parent_index_mapper != nullptr) {
TF_ASSIGN_OR_RETURN(index, parent_index_mapper(index));
}
++next_positions_[input_dataset_index];
TF_ASSIGN_OR_RETURN(int64_t shuffled_dataset_index,
random_access.GetDatasetIndex(index));
if (input_dataset_index == shuffled_dataset_index) {
if (input_dataset_index > 0) {
TF_ASSIGN_OR_RETURN(
int64_t cumulative_cardinality,
random_access.CumulativeCardinality(input_dataset_index - 1));
index -= cumulative_cardinality;
}
return index;
}
}
return *cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args),
{model::MakeNonTunableParameter(kCycleLength, 1)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override
TF_LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kExhausted, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kElementIndex, element_index_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kCurrentElementIteratorUninitialized,
static_cast<int64_t>(!current_element_iterator_)));
if (current_element_iterator_ && !ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputsSize, inputs_.size()));
for (int i = 0; i < inputs_.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kInputs, "[", i, "]"), inputs_[i]));
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, current_element_iterator_));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override
TF_LOCKS_EXCLUDED(mu_) {
if (ctx->restored_element_count().has_value()) {
return RestoreForGlobalShuffle(ctx, reader);
}
mutex_lock l(mu_);
input_impl_.reset();
element_index_ = 0;
current_element_iterator_.reset();
inputs_.clear();
int64_t input_exhausted;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kExhausted, &input_exhausted));
if (!static_cast<bool>(input_exhausted)) {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kElementIndex, &temp));
element_index_ = temp;
}
int64_t current_element_iterator_uninitialized;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentElementIteratorUninitialized,
¤t_element_iterator_uninitialized));
if (!static_cast<bool>(current_element_iterator_uninitialized)) {
TF_RETURN_IF_ERROR(RestoreCurrentElementIterator(ctx, reader));
}
}
return absl::OkStatus();
}
Status RestoreForGlobalShuffle(IteratorContext* ctx,
IteratorStateReader* reader)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
element_count_ = *ctx->restored_element_count();
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
TF_ASSIGN_OR_RETURN(int64_t cardinality, random_access.Cardinality());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
}
input_element_counts_.resize(dataset_iterators_.size(), 0);
next_positions_.resize(dataset_iterators_.size(), 0);
std::fill(input_element_counts_.begin(), input_element_counts_.end(), 0);
std::fill(next_positions_.begin(), next_positions_.end(), 0);
for (size_t count = 0; count < element_count_ && count < cardinality;
++count) {
TF_ASSIGN_OR_RETURN(size_t parent_index, ctx->index_mapper()(count));
absl::StatusOr<size_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
break;
}
TF_RETURN_IF_ERROR(dataset_index.status());
++input_element_counts_[*dataset_index];
next_positions_[*dataset_index] = count + 1;
}
for (size_t i = 0; i < dataset_iterators_.size(); ++i) {
IteratorContext::Params params(ctx);
params.restored_element_count = input_element_counts_[i];
IteratorContext ctx_copy(std::move(params));
TF_RETURN_IF_ERROR(
RestoreInput(&ctx_copy, reader, dataset_iterators_[i]));
ctx->MergeCheckpoint(ctx_copy.checkpoint());
}
return absl::OkStatus();
}
private:
Status BuildCurrentElementIteratorLocked(IteratorContext* ctx,
bool is_get_next)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<model::Node> node = is_get_next ? model_node() : nullptr;
return MakeIteratorFromInputElement(
ctx, this, inputs_, element_index_++, *instantiated_captured_func_,
prefix(), ¤t_element_iterator_, node);
}
Status RestoreCurrentElementIterator(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (ctx->symbolic_checkpoint()) {
return RestoreCurrentElementIteratorSymbolic(ctx, reader);
}
size_t inputs_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kInputsSize, &temp));
inputs_size = static_cast<size_t>(temp);
}
inputs_.reserve(inputs_size);
for (int i = 0; i < inputs_size; i++) {
inputs_.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix(), strings::StrCat(kInputs, "[", i, "]"),
&inputs_.back()));
}
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
Status RestoreCurrentElementIteratorSymbolic(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
bool end_of_sequence;
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, &end_of_sequence));
if (end_of_sequence) {
return absl::FailedPreconditionError(
"Unexpected end of sequence while symbolically restoring "
"FlatMapDataset. Please verify that the input produces data "
"deterministically.");
}
input_ckpt_->Merge(input_ctx->checkpoint());
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
mutex mu_;
size_t element_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<MemoryCheckpoint> input_ckpt_ TF_GUARDED_BY(mu_);
std::vector<Tensor> inputs_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
size_t element_count_ TF_GUARDED_BY(mu_) = 0;
std::vector<int64_t> input_element_counts_ TF_GUARDED_BY(mu_);
std::vector<size_t> next_positions_;
std::vector<std::unique_ptr<IteratorBase>> dataset_iterators_
TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> current_element_iterator_ TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
mutable FlatMapRandomAccessHandler random_access_handler_;
};
FlatMapDatasetOp::FlatMapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void FlatMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), output_types_,
output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FlatMapDataset").Device(DEVICE_CPU),
FlatMapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("FlatMapDataset");
}
}
} | #include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "flat_map_dataset";
class FlatMapDatasetParams : public DatasetParams {
public:
template <typename T>
FlatMapDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return other_arguments_;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(FlatMapDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(FlatMapDatasetOp::kOtherArguments, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return FlatMapDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class FlatMapDatasetOpTest : public DatasetOpsTestBase {};
FlatMapDatasetParams FlatMapDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{"Toutput_types", DataTypeVector({DT_INT64})},
{"output_shapes",
std::vector<PartialTensorShape>({PartialTensorShape({1})})}});
return FlatMapDatasetParams(
std::move(tensor_slice_dataset_params),
{},
func,
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
FlatMapDatasetParams InvalidFlatMapDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef( "NonZero",
{{"T", DT_INT64}});
return FlatMapDatasetParams(std::move(tensor_slice_dataset_params),
{},
func,
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<FlatMapDatasetParams>> GetNextTestCases() {
return {
{FlatMapDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_GET_NEXT_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<FlatMapDatasetParams>> SkipTestCases() {
return {{FlatMapDatasetParams1(),
2, 2, true,
CreateTensors<int64_t>(TensorShape({1}), {{2}})},
{FlatMapDatasetParams1(),
4, 4, true,
CreateTensors<int64_t>(TensorShape({1}), {{4}})},
{FlatMapDatasetParams1(),
9, 9, false},
{FlatMapDatasetParams1(),
10, 9, false}};
}
ITERATOR_SKIP_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
SkipTestCases())
TEST_F(FlatMapDatasetOpTest, DatasetNodeName) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FlatMapDatasetOpTest, DatasetTypeString) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FlatMapDatasetOp::kDatasetType)));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, Cardinality) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorPrefix) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FlatMapDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<FlatMapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{FlatMapDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(FlatMapDatasetOpTest, InvalidMapFunc) {
auto dataset_params = InvalidFlatMapDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5e803399-b51e-4508-8b73-9cd75c8bc46e | cpp | tensorflow/tensorflow | fixed_length_record_dataset_op | tensorflow/core/kernels/data/fixed_length_record_dataset_op.cc | tensorflow/core/kernels/data/fixed_length_record_dataset_op_test.cc | #include "tensorflow/core/kernels/data/fixed_length_record_dataset_op.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
namespace tensorflow {
namespace data {
constexpr const char* const
FixedLengthRecordDatasetOp::kDatasetType;
constexpr const char* const FixedLengthRecordDatasetOp::kFileNames;
constexpr const char* const
FixedLengthRecordDatasetOp::kHeaderBytes;
constexpr const char* const
FixedLengthRecordDatasetOp::kRecordBytes;
constexpr const char* const
FixedLengthRecordDatasetOp::kFooterBytes;
constexpr const char* const
FixedLengthRecordDatasetOp::kBufferSize;
constexpr const char* const
FixedLengthRecordDatasetOp::kCompressionType;
constexpr char kFixedLengthRecordDataset[] = "FixedLengthRecordDataset";
constexpr char kCurrentFileIndex[] = "current_file_index";
constexpr char kCurrentPos[] = "current_pos";
constexpr char kZLIB[] = "ZLIB";
constexpr char kGZIP[] = "GZIP";
class FixedLengthRecordDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, std::vector<string> filenames,
int64_t header_bytes, int64_t record_bytes,
int64_t footer_bytes, int64_t buffer_size,
const string& compression_type, int op_version)
: DatasetBase(DatasetContext(ctx)),
filenames_(std::move(filenames)),
header_bytes_(header_bytes),
record_bytes_(record_bytes),
footer_bytes_(footer_bytes),
buffer_size_(buffer_size),
compression_type_(compression_type),
op_version_(op_version) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
if (compression_type_.empty()) {
return std::make_unique<UncompressedIterator>(
UncompressedIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
} else {
return std::make_unique<CompressedIterator>(CompressedIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(kDatasetType, params);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* filenames = nullptr;
Node* header_bytes = nullptr;
Node* record_bytes = nullptr;
Node* footer_bytes = nullptr;
Node* buffer_size = nullptr;
Node* compression_type = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames));
TF_RETURN_IF_ERROR(b->AddScalar(header_bytes_, &header_bytes));
TF_RETURN_IF_ERROR(b->AddScalar(record_bytes_, &record_bytes));
TF_RETURN_IF_ERROR(b->AddScalar(footer_bytes_, &footer_bytes));
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size));
TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type));
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{filenames, header_bytes, record_bytes, footer_bytes,
buffer_size, compression_type},
output));
return absl::OkStatus();
}
private:
class UncompressedIterator : public DatasetIterator<Dataset> {
public:
explicit UncompressedIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (input_buffer_) {
const int64_t current_pos = input_buffer_->Tell();
DCHECK_GE(file_pos_limit_, 0);
if (current_pos < file_pos_limit_) {
string record;
TF_RETURN_IF_ERROR(
input_buffer_->ReadNBytes(dataset()->record_bytes_, &record));
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(kDatasetType);
bytes_counter->IncrementBy(dataset()->record_bytes_);
Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<tstring>()() = record;
out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false;
return absl::OkStatus();
}
input_buffer_.reset();
file_.reset();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
uint64 file_size;
const std::string& next_filename =
dataset()->filenames_[current_file_index_];
TF_RETURN_IF_ERROR(ctx->env()->GetFileSize(next_filename, &file_size));
file_pos_limit_ = file_size - dataset()->footer_bytes_;
uint64 body_size =
file_size - (dataset()->header_bytes_ + dataset()->footer_bytes_);
if (body_size % dataset()->record_bytes_ != 0) {
return errors::InvalidArgument(
"Excluding the header (", dataset()->header_bytes_,
" bytes) and footer (", dataset()->footer_bytes_,
" bytes), input file \"", next_filename, "\" has body length ",
body_size,
" bytes, which is not an exact multiple of the record length (",
dataset()->record_bytes_, " bytes).");
}
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(next_filename), &file_));
input_buffer_ = std::make_unique<io::InputBuffer>(
file_.get(), dataset()->buffer_size_);
TF_RETURN_IF_ERROR(input_buffer_->SkipNBytes(dataset()->header_bytes_));
} while (true);
}
protected:
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
int64_t current_pos = input_buffer_ ? input_buffer_->Tell() : -1;
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurrentPos, current_pos));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
int64_t current_pos;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentPos, ¤t_pos));
input_buffer_.reset();
file_.reset();
if (current_pos >= 0) {
uint64 file_size;
const std::string& current_filename =
dataset()->filenames_[current_file_index_];
TF_RETURN_IF_ERROR(
ctx->env()->GetFileSize(current_filename, &file_size));
file_pos_limit_ = file_size - dataset()->footer_bytes_;
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(current_filename), &file_));
input_buffer_ = std::make_unique<io::InputBuffer>(
file_.get(), dataset()->buffer_size_);
TF_RETURN_IF_ERROR(input_buffer_->Seek(current_pos));
}
return absl::OkStatus();
}
private:
mutex mu_;
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_
TF_GUARDED_BY(mu_);
std::unique_ptr<io::InputBuffer> input_buffer_ TF_GUARDED_BY(mu_);
int64_t file_pos_limit_ TF_GUARDED_BY(mu_) = -1;
};
class CompressedIterator : public DatasetIterator<Dataset> {
public:
explicit CompressedIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(kDatasetType);
mutex_lock l(mu_);
do {
if (buffered_input_stream_) {
const int64_t current_pos = buffered_input_stream_->Tell();
if (dataset()->compression_type_.empty()) {
DCHECK_GE(file_pos_limit_, 0);
if (current_pos < file_pos_limit_) {
tstring record;
TF_RETURN_IF_ERROR(buffered_input_stream_->ReadNBytes(
dataset()->record_bytes_, &record));
bytes_counter->IncrementBy(dataset()->record_bytes_);
Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<tstring>()() = std::move(record);
out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false;
return absl::OkStatus();
}
} else {
tstring record;
Status s = buffered_input_stream_->ReadNBytes(
dataset()->record_bytes_, &record);
if (s.ok()) {
bytes_counter->IncrementBy(dataset()->record_bytes_);
lookahead_cache_.append(record);
StringPiece lookahead_cache_view(lookahead_cache_);
record = tstring(
lookahead_cache_view.substr(0, dataset()->record_bytes_));
lookahead_cache_ = tstring(
lookahead_cache_view.substr(dataset()->record_bytes_));
Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<tstring>()() = std::move(record);
out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(s) && !record.empty()) {
uint64 body_size =
current_pos + record.size() -
(dataset()->header_bytes_ + dataset()->footer_bytes_);
return errors::DataLoss(
"Excluding the header (", dataset()->header_bytes_,
" bytes) and footer (", dataset()->footer_bytes_,
" bytes), input file \"",
dataset()->filenames_[current_file_index_],
"\" has body length ", body_size,
" bytes, which is not an exact multiple of the record "
"length (",
dataset()->record_bytes_, " bytes).");
}
}
buffered_input_stream_.reset();
file_.reset();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (dataset()->compression_type_.empty()) {
uint64 file_size;
TF_RETURN_IF_ERROR(ctx->env()->GetFileSize(
dataset()->filenames_[current_file_index_], &file_size));
file_pos_limit_ = file_size - dataset()->footer_bytes_;
uint64 body_size =
file_size - (dataset()->header_bytes_ + dataset()->footer_bytes_);
if (body_size % dataset()->record_bytes_ != 0) {
return errors::InvalidArgument(
"Excluding the header (", dataset()->header_bytes_,
" bytes) and footer (", dataset()->footer_bytes_,
" bytes), input file \"",
dataset()->filenames_[current_file_index_],
"\" has body length ", body_size,
" bytes, which is not an exact multiple of the record length "
"(",
dataset()->record_bytes_, " bytes).");
}
}
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
if (!dataset()->compression_type_.empty()) {
const io::ZlibCompressionOptions zlib_options =
dataset()->compression_type_ == kZLIB
? io::ZlibCompressionOptions::DEFAULT()
: io::ZlibCompressionOptions::GZIP();
file_stream_ =
std::make_unique<io::RandomAccessInputStream>(file_.get());
buffered_input_stream_ = std::make_unique<io::ZlibInputStream>(
file_stream_.get(), dataset()->buffer_size_,
dataset()->buffer_size_, zlib_options);
} else {
buffered_input_stream_ = std::make_unique<io::BufferedInputStream>(
file_.get(), dataset()->buffer_size_);
}
TF_RETURN_IF_ERROR(
buffered_input_stream_->SkipNBytes(dataset()->header_bytes_));
lookahead_cache_.clear();
if (!dataset()->compression_type_.empty()) {
TF_RETURN_IF_ERROR(buffered_input_stream_->ReadNBytes(
dataset()->footer_bytes_, &lookahead_cache_));
}
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
int64_t current_pos =
buffered_input_stream_ ? buffered_input_stream_->Tell() : -1;
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurrentPos, current_pos));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
int64_t current_pos;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentPos, ¤t_pos));
buffered_input_stream_.reset();
file_.reset();
if (current_pos >= 0) {
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
const io::ZlibCompressionOptions zlib_options =
dataset()->compression_type_ == kZLIB
? io::ZlibCompressionOptions::DEFAULT()
: io::ZlibCompressionOptions::GZIP();
file_stream_ =
std::make_unique<io::RandomAccessInputStream>(file_.get());
buffered_input_stream_ = std::make_unique<io::ZlibInputStream>(
file_stream_.get(), dataset()->buffer_size_,
dataset()->buffer_size_, zlib_options);
lookahead_cache_.clear();
TF_RETURN_IF_ERROR(buffered_input_stream_->SkipNBytes(
current_pos - dataset()->footer_bytes_));
TF_RETURN_IF_ERROR(buffered_input_stream_->ReadNBytes(
dataset()->footer_bytes_, &lookahead_cache_));
}
return absl::OkStatus();
}
private:
mutex mu_;
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_
TF_GUARDED_BY(mu_);
std::unique_ptr<io::RandomAccessInputStream>
file_stream_;
std::unique_ptr<io::InputStreamInterface> buffered_input_stream_
TF_GUARDED_BY(mu_);
int64_t file_pos_limit_ TF_GUARDED_BY(mu_) = -1;
tstring lookahead_cache_ TF_GUARDED_BY(mu_);
};
const std::vector<string> filenames_;
const int64_t header_bytes_;
const int64_t record_bytes_;
const int64_t footer_bytes_;
const int64_t buffer_size_;
const tstring compression_type_;
const int op_version_;
};
FixedLengthRecordDatasetOp::FixedLengthRecordDatasetOp(
OpKernelConstruction* ctx)
: DatasetOpKernel(ctx),
op_version_(ctx->def().op() == kFixedLengthRecordDataset ? 1 : 2) {}
void FixedLengthRecordDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
const Tensor* filenames_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kFileNames, &filenames_tensor));
OP_REQUIRES(
ctx, filenames_tensor->dims() <= 1,
errors::InvalidArgument("`filenames` must be a scalar or a vector."));
std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<tstring>()(i));
metrics::RecordTFDataFilename(kDatasetType, filenames[i]);
}
LogFilenames(filenames);
int64_t header_bytes = -1;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64_t>(ctx, kHeaderBytes, &header_bytes));
OP_REQUIRES(ctx, header_bytes >= 0,
errors::InvalidArgument("`header_bytes` must be >= 0"));
int64_t record_bytes = -1;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64_t>(ctx, kRecordBytes, &record_bytes));
OP_REQUIRES(ctx, record_bytes > 0,
errors::InvalidArgument("`record_bytes` must be > 0"));
int64_t footer_bytes = -1;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64_t>(ctx, kFooterBytes, &footer_bytes));
OP_REQUIRES(ctx, footer_bytes >= 0,
errors::InvalidArgument("`footer_bytes` must be >= 0"));
int64_t buffer_size = -1;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(ctx, buffer_size >= 0,
errors::InvalidArgument("`buffer_size` must be >= 0"));
if (buffer_size == 0) {
buffer_size = 256 << 10;
}
tstring compression_type;
if (op_version_ > 1) {
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kCompressionType,
&compression_type));
OP_REQUIRES(ctx,
compression_type.empty() || compression_type == kZLIB ||
compression_type == kGZIP,
errors::InvalidArgument("Unsupported compression_type."));
}
*output =
new Dataset(ctx, std::move(filenames), header_bytes, record_bytes,
footer_bytes, buffer_size, compression_type, op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FixedLengthRecordDataset").Device(DEVICE_CPU),
FixedLengthRecordDatasetOp);
REGISTER_KERNEL_BUILDER(Name("FixedLengthRecordDatasetV2").Device(DEVICE_CPU),
FixedLengthRecordDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/fixed_length_record_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "fixed_length_record_dataset";
constexpr int kOpVersion = 2;
tstring LocalTempFilename() {
std::string path;
CHECK(Env::Default()->LocalTempFilename(&path));
return tstring(path);
}
class FixedLengthRecordDatasetParams : public DatasetParams {
public:
FixedLengthRecordDatasetParams(const std::vector<tstring>& filenames,
int64_t header_bytes, int64_t record_bytes,
int64_t footer_bytes, int64_t buffer_size,
CompressionType compression_type,
string node_name)
: DatasetParams({DT_STRING}, {PartialTensorShape({})},
std::move(node_name)),
filenames_(filenames),
header_bytes_(header_bytes),
record_bytes_(record_bytes),
footer_bytes_(footer_bytes),
buffer_size_(buffer_size),
compression_type_(compression_type) {
op_version_ = 2;
}
std::vector<Tensor> GetInputTensors() const override {
int num_files = filenames_.size();
return {
CreateTensor<tstring>(TensorShape({num_files}), filenames_),
CreateTensor<int64_t>(TensorShape({}), {header_bytes_}),
CreateTensor<int64_t>(TensorShape({}), {record_bytes_}),
CreateTensor<int64_t>(TensorShape({}), {footer_bytes_}),
CreateTensor<int64_t>(TensorShape({}), {buffer_size_}),
CreateTensor<tstring>(TensorShape({}), {ToString(compression_type_)})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
*input_names = {FixedLengthRecordDatasetOp::kFileNames,
FixedLengthRecordDatasetOp::kHeaderBytes,
FixedLengthRecordDatasetOp::kRecordBytes,
FixedLengthRecordDatasetOp::kFooterBytes,
FixedLengthRecordDatasetOp::kBufferSize,
FixedLengthRecordDatasetOp::kCompressionType};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return FixedLengthRecordDatasetOp::kDatasetType;
}
private:
std::vector<tstring> filenames_;
int64_t header_bytes_;
int64_t record_bytes_;
int64_t footer_bytes_;
int64_t buffer_size_;
CompressionType compression_type_;
};
class FixedLengthRecordDatasetOpTest : public DatasetOpsTestBase {};
Status CreateTestFiles(const std::vector<tstring>& filenames,
const std::vector<string>& contents,
CompressionType compression_type) {
if (filenames.size() != contents.size()) {
return tensorflow::errors::InvalidArgument(
"The number of files does not match with the contents");
}
if (compression_type == CompressionType::UNCOMPRESSED) {
for (int i = 0; i < filenames.size(); ++i) {
TF_RETURN_IF_ERROR(WriteDataToFile(filenames[i], contents[i].data()));
}
} else {
CompressionParams params;
params.output_buffer_size = 10;
params.compression_type = compression_type;
for (int i = 0; i < filenames.size(); ++i) {
TF_RETURN_IF_ERROR(
WriteDataToFile(filenames[i], contents[i].data(), params));
}
}
return absl::OkStatus();
}
FixedLengthRecordDatasetParams FixedLengthRecordDatasetParams1() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<string> contents = {
absl::StrCat("HHHHH", "111", "222", "333", "FF"),
absl::StrCat("HHHHH", "aaa", "bbb", "FF")};
CompressionType compression_type = CompressionType::ZLIB;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return FixedLengthRecordDatasetParams(filenames,
5,
3,
2,
10,
compression_type,
kNodeName);
}
FixedLengthRecordDatasetParams FixedLengthRecordDatasetParams2() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<string> contents = {
absl::StrCat("HHHHH", "111", "222", "333", "FF"),
absl::StrCat("HHHHH", "aaa", "bbb", "FF")};
CompressionType compression_type = CompressionType::GZIP;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return FixedLengthRecordDatasetParams(filenames,
5,
3,
2,
10,
compression_type,
kNodeName);
}
FixedLengthRecordDatasetParams FixedLengthRecordDatasetParams3() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<string> contents = {
absl::StrCat("HHHHH", "111", "222", "333", "FF"),
absl::StrCat("HHHHH", "aaa", "bbb", "FF")};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return FixedLengthRecordDatasetParams(filenames,
5,
3,
2,
10,
compression_type,
kNodeName);
}
std::vector<GetNextTestCase<FixedLengthRecordDatasetParams>>
GetNextTestCases() {
return {
{FixedLengthRecordDatasetParams1(),
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams2(),
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams3(),
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})}};
}
ITERATOR_GET_NEXT_TEST_P(FixedLengthRecordDatasetOpTest,
FixedLengthRecordDatasetParams, GetNextTestCases())
TEST_F(FixedLengthRecordDatasetOpTest, DatasetNodeName) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FixedLengthRecordDatasetOpTest, DatasetTypeString) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = kOpVersion;
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FixedLengthRecordDatasetOp::kDatasetType, params)));
}
TEST_F(FixedLengthRecordDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_STRING}));
}
TEST_F(FixedLengthRecordDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(FixedLengthRecordDatasetOpTest, Cardinality) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(FixedLengthRecordDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_STRING}));
}
TEST_F(FixedLengthRecordDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(FixedLengthRecordDatasetOpTest, IteratorPrefix) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams iterator_prefix_params;
iterator_prefix_params.op_version = kOpVersion;
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FixedLengthRecordDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), iterator_prefix_params)));
}
std::vector<IteratorSaveAndRestoreTestCase<FixedLengthRecordDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{FixedLengthRecordDatasetParams1(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams2(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams3(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FixedLengthRecordDatasetOpTest,
FixedLengthRecordDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/fixed_length_record_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/fixed_length_record_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
20dd759b-3844-48bd-aad9-73b89823041d | cpp | tensorflow/tensorflow | finalize_dataset_op | tensorflow/core/kernels/data/finalize_dataset_op.cc | tensorflow/core/kernels/data/finalize_dataset_op_test.cc | #include "tensorflow/core/kernels/data/finalize_dataset_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/experimental/threadpool_dataset_op.h"
#include "tensorflow/core/kernels/data/model_dataset_op.h"
#include "tensorflow/core/kernels/data/optimize_dataset_op.h"
namespace tensorflow {
namespace data {
constexpr const char* const FinalizeDatasetOp::kDatasetType;
constexpr const char* const FinalizeDatasetOp::kInputDataset;
constexpr const char* const FinalizeDatasetOp::kOutputTypes;
constexpr const char* const FinalizeDatasetOp::kOutputShapes;
constexpr const char* const FinalizeDatasetOp::kHasCapturedRef;
namespace {
void GetModelDatasetParams(const Options& options,
model::AutotuneAlgorithm* algorithm,
int64_t* cpu_budget, int64_t* ram_budget) {
*algorithm = model::AutotuneAlgorithm::HILL_CLIMB;
*cpu_budget = options.autotune_options().cpu_budget();
*ram_budget = options.autotune_options().ram_budget();
}
void MakeDatasetHelper(OpKernelContext* ctx, bool has_captured_ref,
DatasetBase* input, DatasetBase** output) {
*output = input;
input->Ref();
const Options& options = input->options();
if (ShouldConfigureMaxIntraOpParallelism(options)) {
experimental::MaxIntraOpParallelismDatasetOp::MakeDatasetFromOptions(
ctx, input, options.threading_options().max_intra_op_parallelism(),
output);
input->Unref();
input = *output;
}
if (ShouldUsePrivateThreadPool(options)) {
experimental::PrivateThreadPoolDatasetOp::MakeDatasetFromOptions(
ctx, input, options.threading_options().private_threadpool_size(),
output);
input->Unref();
input = *output;
}
if (ShouldUseAutotuning(options)) {
model::AutotuneAlgorithm algorithm;
int64_t cpu_budget;
int64_t ram_budget;
GetModelDatasetParams(options, &algorithm, &cpu_budget, &ram_budget);
ModelDatasetOp::MakeDatasetFromOptions(ctx, input, algorithm, cpu_budget,
ram_budget, output);
input->Unref();
input = *output;
}
absl::flat_hash_set<tstring> optimizations_enabled;
absl::flat_hash_set<tstring> optimizations_disabled;
absl::flat_hash_set<tstring> optimizations_default;
GetOptimizations(options, &optimizations_enabled, &optimizations_disabled,
&optimizations_default);
if (ShouldApplyOptimizations(options, optimizations_enabled,
optimizations_default)) {
if (has_captured_ref &&
(!optimizations_enabled.empty() || !optimizations_default.empty())) {
LOG(WARNING)
<< "tf.data graph rewrites are not compatible with reference "
"variables. The following rewrites will be disabled: "
<< absl::StrJoin(optimizations_enabled, ", ") << ", "
<< absl::StrJoin(optimizations_default, ", ") << ". "
<< "To enable rewrites, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program.";
} else {
auto optimization_configs = CreateGraphRewriteConfigs(options);
OptimizeDatasetOp::MakeDatasetFromOptions(
ctx, input, optimizations_enabled, optimizations_disabled,
optimizations_default, optimization_configs, output);
input->Unref();
input = *output;
}
}
}
}
FinalizeDatasetOp::FinalizeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kHasCapturedRef)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kHasCapturedRef, &has_captured_ref_));
} else {
has_captured_ref_ = false;
}
}
void FinalizeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
MakeDatasetHelper(ctx, has_captured_ref_, input, output);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FinalizeDataset").Device(DEVICE_CPU).Priority(2),
FinalizeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("FinalizeDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
FinalizeDatasetNoopOp);
}
}
} | #include "tensorflow/core/kernels/data/finalize_dataset_op.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
class FinalizeDatasetParams : public DatasetParams {
public:
template <typename T>
FinalizeDatasetParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
has_captured_ref_(false) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(FinalizeDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{FinalizeDatasetOp::kHasCapturedRef, has_captured_ref_},
{FinalizeDatasetOp::kOutputTypes, output_dtypes_},
{FinalizeDatasetOp::kOutputShapes, output_shapes_}};
return absl::OkStatus();
}
string dataset_type() const override { return "Finalize"; }
private:
bool has_captured_ref_;
};
class FinalizeDatasetOpTest : public DatasetOpsTestBase {
public:
void CheckDatasetPipelineTypeStrings(
const std::vector<std::string>& type_strings) {
CheckDatasetPipelineTypeString(dataset_, type_strings, 0);
}
void CheckDatasetPipelineTypeString(
const DatasetBase* dataset, const std::vector<std::string>& type_strings,
int index) {
EXPECT_GT(type_strings.size(), index);
EXPECT_EQ(dataset->type_string(), type_strings[index]);
std::vector<const DatasetBase*> input_datasets;
TF_ASSERT_OK(dataset->InputDatasets(&input_datasets));
if (input_datasets.empty()) {
return;
}
EXPECT_EQ(1, input_datasets.size());
CheckDatasetPipelineTypeString(input_datasets[0], type_strings, index + 1);
}
};
constexpr char kNoOptimizationOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
)pb";
constexpr char kMaxIntraOpParallelismOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
threading_options { max_intra_op_parallelism: 10 }
)pb";
constexpr char kPrivateThreadPoolOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
threading_options { private_threadpool_size: 10 }
)pb";
constexpr char kModelOptions[] = R"pb(
optimization_options { apply_default_optimizations: false }
)pb";
constexpr char kOptimizationsDefaultOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: true }
)pb";
constexpr char kAllChainedDatasetsOptions[] = R"pb(
autotune_options { enabled: true }
optimization_options { apply_default_optimizations: true }
threading_options { max_intra_op_parallelism: 10 private_threadpool_size: 10 }
)pb";
OptionsDatasetParams NoOptimizationOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kNoOptimizationOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams MaxIntraOpParallelismOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kMaxIntraOpParallelismOptions,
&options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams PrivateThreadPoolOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kPrivateThreadPoolOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams ModelOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kModelOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams OptimizationsDefaultOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kOptimizationsDefaultOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams AllChainedDatasetsOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kAllChainedDatasetsOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
FinalizeDatasetParams NoOptimizationFinalizeParams() {
return FinalizeDatasetParams(NoOptimizationOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
FinalizeDatasetParams MaxIntraOpParallelismParams() {
return FinalizeDatasetParams(MaxIntraOpParallelismOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"MaxIntraOpParallelismDatasetOp");
}
FinalizeDatasetParams PrivateThreadPoolParams() {
return FinalizeDatasetParams(PrivateThreadPoolOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"PrivateThreadPoolDatasetOp");
}
FinalizeDatasetParams ModelParams() {
return FinalizeDatasetParams(ModelOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"ModelDatasetOp");
}
FinalizeDatasetParams OptimizationsDefaultParams() {
return FinalizeDatasetParams(OptimizationsDefaultOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"private_thread_pool");
}
FinalizeDatasetParams AllChainedDatasetsParams() {
return FinalizeDatasetParams(AllChainedDatasetsOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"inject/prefetch_ModelDataset/_9");
}
TEST_F(FinalizeDatasetOpTest, NoOptimizationNodeName) {
auto test_case_params = NoOptimizationFinalizeParams();
TF_ASSERT_OK(Initialize(test_case_params));
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings({"OptionsDataset", "RangeDataset"});
}
std::vector<GetNextTestCase<FinalizeDatasetParams>> GetNextTestCases() {
return {{NoOptimizationFinalizeParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{MaxIntraOpParallelismParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{PrivateThreadPoolParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{ModelParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{OptimizationsDefaultParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{AllChainedDatasetsParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})}};
}
ITERATOR_GET_NEXT_TEST_P(FinalizeDatasetOpTest, FinalizeDatasetParams,
GetNextTestCases())
TEST_F(FinalizeDatasetOpTest, MaxIntraOpParallelismNodeName) {
auto test_case_params = MaxIntraOpParallelismParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"MaxIntraOpParallelismDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, PrivateThreadPoolNodeName) {
auto test_case_params = PrivateThreadPoolParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"PrivateThreadPoolDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, ModelNodeName) {
auto test_case_params = ModelParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"ModelDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, OptimizationsDefaultNodeName) {
auto test_case_params = OptimizationsDefaultParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings({"PrivateThreadPoolDataset",
"MaxIntraOpParallelismDataset",
"OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, AllChainedDatasetsNodeName) {
auto test_case_params = AllChainedDatasetsParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"PrefetchDataset", "ModelDataset", "PrivateThreadPoolDataset",
"MaxIntraOpParallelismDataset", "OptionsDataset", "RangeDataset"});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/finalize_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/finalize_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04b44e2d-8870-434b-af96-49d0038076f2 | cpp | tensorflow/tensorflow | range_dataset_op | tensorflow/core/kernels/data/range_dataset_op.cc | tensorflow/core/kernels/data/range_dataset_op_test.cc | #include "tensorflow/core/kernels/data/range_dataset_op.h"
#include <cstdlib>
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
namespace tensorflow {
namespace data {
constexpr const char* const RangeDatasetOp::kDatasetType;
constexpr const char* const RangeDatasetOp::kStart;
constexpr const char* const RangeDatasetOp::kStop;
constexpr const char* const RangeDatasetOp::kStep;
constexpr const char* const RangeDatasetOp::kOutputTypes;
constexpr const char* const RangeDatasetOp::kOutputShapes;
constexpr const char* const RangeDatasetOp::kReplicateOnSplit;
namespace {
constexpr char kNext[] = "next";
constexpr char kHasSplitProvider[] = "has_split_provider";
constexpr char kSlash[] = "/";
constexpr char kSplitProvider[] = "split_provider";
Status ConvertOutputTypes(const tensorflow::DataTypeVector& output_dtypes,
std::vector<Tensor>* out_tensors, int64 value) {
switch (output_dtypes[0]) {
#define HANDLE_TYPE(type) \
case DataTypeToEnum<type>::value: { \
out_tensors->emplace_back(static_cast<type>(value)); \
break; \
}
TF_CALL_NUMBER_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::InvalidArgument("Unsupported data type: ",
DataTypeString(output_dtypes[0]));
}
return absl::OkStatus();
}
int64_t sgn(int64_t val) { return (0 < val) - (val < 0); }
int64_t RangeCardinality(int64_t start, int64_t stop, int64_t step) {
if (stop >= tsl::kint64max) {
return kInfiniteCardinality;
}
if (sgn(stop - start) * sgn(step) <= 0) {
return 0;
} else if (step > 0) {
return (stop - start - 1) / step + 1;
} else {
return (start - stop - 1) / -step + 1;
}
}
class RangeCounter {
public:
RangeCounter(int64_t start, int64_t stop, int64_t step)
: start_(start), stop_(stop), step_(step), next_(start) {}
int64_t GetNext(bool* end_of_counter) {
mutex_lock l(mu_);
if ((step_ > 0 && next_ >= stop_) || (step_ < 0 && next_ <= stop_)) {
*end_of_counter = true;
return -1;
}
*end_of_counter = false;
int64_t result = next_;
next_ += step_;
return result;
}
int64_t Peek() const {
mutex_lock l(mu_);
return next_;
}
void Reset() {
mutex_lock l(mu_);
next_ = start_;
}
void SetNext(int64_t value) {
mutex_lock l(mu_);
next_ = value;
}
int64_t Cardinality() const { return RangeCardinality(start_, stop_, step_); }
private:
const int64_t start_;
const int64_t stop_;
const int64_t step_;
mutable mutex mu_;
int64_t next_ TF_GUARDED_BY(mu_);
};
}
class RangeDatasetOp::RangeSplitProvider : public SplitProvider {
public:
RangeSplitProvider(int64_t start, int64_t stop, int64_t step)
: counter_(start, stop, step) {}
Status GetNext(Tensor* split, bool* end_of_splits) override {
int64_t next = counter_.GetNext(end_of_splits);
if (*end_of_splits) {
return absl::OkStatus();
}
*split = Tensor(DT_INT64, TensorShape{});
split->scalar<int64_t>()() = next;
return absl::OkStatus();
}
Status Reset() override {
counter_.Reset();
return absl::OkStatus();
}
Status Save(std::function<std::string(std::string)> key_name_fn,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_name_fn(kNext), counter_.Peek()));
return absl::OkStatus();
}
Status Restore(std::function<std::string(std::string)> key_name_fn,
IteratorStateReader* reader) override {
int64_t next;
TF_RETURN_IF_ERROR(reader->ReadScalar(key_name_fn(kNext), &next));
counter_.SetNext(next);
return absl::OkStatus();
}
int64_t Cardinality() const override { return counter_.Cardinality(); }
private:
RangeCounter counter_;
};
class RangeDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t start, int64_t stop, int64_t step,
DataTypeVector output_dtypes, bool replicate_on_split)
: DatasetBase(DatasetContext(ctx)),
start_(start),
stop_(stop),
step_(step),
output_dtypes_(output_dtypes),
replicate_on_split_(replicate_on_split) {}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return output_dtypes_;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({PartialTensorShape({})});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(start_, stop_, step_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return RangeCardinality(start_, stop_, step_);
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<RangeSplitProvider>(start_, stop_, step_));
return absl::OkStatus();
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->clear();
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return ConvertOutputTypes(output_dtypes(), out_tensors,
start_ + (index * step_));
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* start = nullptr;
Node* stop = nullptr;
Node* step = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(start_, &start));
TF_RETURN_IF_ERROR(b->AddScalar(stop_, &stop));
TF_RETURN_IF_ERROR(b->AddScalar(step_, &step));
AttrValue replicate_on_split;
b->BuildAttrValue(replicate_on_split_, &replicate_on_split);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {start, stop, step},
{std::make_pair(kReplicateOnSplit, replicate_on_split)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty() || dataset()->replicate_on_split_) {
counter_ = std::make_unique<RangeCounter>(
dataset()->start_, dataset()->stop_, dataset()->step_);
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
int64_t value;
if (split_provider_ != nullptr) {
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
value = split.scalar<int64_t>()();
} else {
value = counter_->GetNext(end_of_sequence);
if (*end_of_sequence) {
return absl::OkStatus();
}
}
out_tensors->reserve(1);
return ConvertOutputTypes(output_dtypes(), out_tensors, value);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
if (split_provider_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kHasSplitProvider, true));
TF_RETURN_IF_ERROR(split_provider_->Save(
[this](const std::string& key) {
return SplitProviderKeyNameFn(key);
},
writer));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNext, counter_->Peek()));
}
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
if (reader->Contains(prefix(), kHasSplitProvider)) {
TF_RETURN_IF_ERROR(split_provider_->Restore(
[this](const std::string& key) {
return SplitProviderKeyNameFn(key);
},
reader));
} else {
int64_t next;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNext, &next));
counter_->SetNext(next);
}
return absl::OkStatus();
}
std::string SplitProviderKeyNameFn(const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
}
private:
std::unique_ptr<RangeCounter> counter_;
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const int64_t start_;
const int64_t stop_;
const int64_t step_;
const DataTypeVector output_dtypes_;
const bool replicate_on_split_;
};
RangeDatasetOp::RangeDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
if (ctx->HasAttr(kReplicateOnSplit)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kReplicateOnSplit, &replicate_on_split_));
}
}
void RangeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
int64_t start;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kStart, &start));
int64_t stop;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kStop, &stop));
int64_t step;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kStep, &step));
OP_REQUIRES(ctx, step != 0,
errors::InvalidArgument("step must be a non-zero integer."));
*output =
new Dataset(ctx, start, stop, step, output_types_, replicate_on_split_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RangeDataset").Device(DEVICE_CPU),
RangeDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
class RangeDatasetOpTest : public DatasetOpsTestBase {};
RangeDatasetParams PositiveStepRangeDatasetParams() {
return RangeDatasetParams(0, 10, 3);
}
RangeDatasetParams NegativeStepRangeDatasetParams() {
return RangeDatasetParams(10, 0, -3);
}
RangeDatasetParams Int32OverflowRangeDatasetParames() {
return RangeDatasetParams(2'147'483'647LL, 2'147'483'649LL,
1);
}
RangeDatasetParams UnsignedInt32OverflowRangeDatasetParames() {
return RangeDatasetParams(4'294'967'295LL, 4'294'967'297LL,
1);
}
RangeDatasetParams ZeroStepRangeDatasetParams() {
return RangeDatasetParams(10, 0, 0);
}
RangeDatasetParams RangeDatasetParams1() {
return RangeDatasetParams(0, 10, 3,
{DT_INT32});
}
RangeDatasetParams RangeDatasetParams2() {
return RangeDatasetParams(0, 10, 3,
{DT_INT64});
}
std::vector<GetNextTestCase<RangeDatasetParams>> GetNextTestCases() {
return {{PositiveStepRangeDatasetParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{NegativeStepRangeDatasetParams(),
CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})},
{Int32OverflowRangeDatasetParames(),
CreateTensors<int64_t>(TensorShape({}),
{{2'147'483'647LL}, {2'147'483'648LL}})},
{UnsignedInt32OverflowRangeDatasetParames(),
CreateTensors<int64_t>(TensorShape({}),
{{4'294'967'295LL}, {4'294'967'296LL}})}};
}
ITERATOR_GET_NEXT_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
GetNextTestCases())
TEST_F(RangeDatasetOpTest, DatasetNodeName) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(range_dataset_params.node_name()));
}
TEST_F(RangeDatasetOpTest, DatasetTypeString) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(RangeDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<RangeDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{RangeDatasetParams1(),
{DT_INT32}},
{RangeDatasetParams2(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
DatasetOutputDtypesTestCases())
TEST_F(RangeDatasetOpTest, DatasetOutputShapes) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<RangeDatasetParams>> CardinalityTestCases() {
return {{PositiveStepRangeDatasetParams(),
4},
{NegativeStepRangeDatasetParams(),
4}};
}
DATASET_CARDINALITY_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<RangeDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{RangeDatasetParams1(),
{DT_INT32}},
{RangeDatasetParams2(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
IteratorOutputDtypesTestCases())
TEST_F(RangeDatasetOpTest, IteratorOutputShapes) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(RangeDatasetOpTest, IteratorPrefix) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
RangeDatasetOp::kDatasetType, range_dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<RangeDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{PositiveStepRangeDatasetParams(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{NegativeStepRangeDatasetParams(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(RangeDatasetOpTest, ZeroStep) {
auto range_dataset_params = ZeroStepRangeDatasetParams();
EXPECT_EQ(Initialize(range_dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(RangeDatasetOpTest, SplitProviderPositiveStep) {
auto params = RangeDatasetParams(0, 10, 3,
{DT_INT64});
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 2, 1,
CreateTensors<int64_t>(TensorShape({}), {{3}, {9}})));
}
TEST_F(RangeDatasetOpTest, SplitProviderNegativeStep) {
auto params = RangeDatasetParams(10, 0, -3,
{DT_INT64});
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 2, 0,
CreateTensors<int64_t>(TensorShape({}), {{10}, {4}})));
}
TEST_F(RangeDatasetOpTest, SplitProviderEmpty) {
auto params = RangeDatasetParams(0, 0, 1,
{DT_INT64});
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 2,
CreateTensors<int64_t>(TensorShape({}), {})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/range_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/range_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c26e3140-8b23-45b0-b07c-7702938d3de1 | cpp | tensorflow/tensorflow | rewrite_dataset_op | tensorflow/core/kernels/data/rewrite_dataset_op.cc | tensorflow/core/kernels/data/rewrite_dataset_op_test.cc | #include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <map>
#include <string>
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
constexpr const char* const RewriteDatasetOp::kDatasetType;
constexpr const char* const RewriteDatasetOp::kInputDataset;
constexpr const char* const RewriteDatasetOp::kRewriteName;
constexpr const char* const RewriteDatasetOp::kOutputTypes;
constexpr const char* const RewriteDatasetOp::kOutputShapes;
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
tstring rewrite_name;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kRewriteName, &rewrite_name));
auto config_factory = [rewrite_name]() {
RewriterConfig rewriter_config;
rewriter_config.add_optimizers(std::string(rewrite_name));
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.set_fail_on_optimizer_errors(true);
return rewriter_config;
};
core::RefCountPtr<DatasetBase> rewritten;
OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten));
*output = rewritten.release();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#else
namespace tensorflow {
namespace data {
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
input->Ref();
*output = input;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#endif | #include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "rewrite_dataset";
constexpr char kReplicateOnSplit[] = "replicate_on_split";
class RewriteDatasetParams : public DatasetParams {
public:
template <typename T>
RewriteDatasetParams(T input_dataset_params, string rewrite_name,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
rewrite_name_(rewrite_name) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<tstring>(TensorShape({}), {rewrite_name_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {RewriteDatasetOp::kInputDataset,
RewriteDatasetOp::kRewriteName};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
return absl::OkStatus();
}
string dataset_type() const override {
return RewriteDatasetOp::kDatasetType;
}
private:
string rewrite_name_;
};
class RewriteDatasetOpTest : public DatasetOpsTestBase {};
TEST_F(RewriteDatasetOpTest, ReplicateOnSplit) {
auto range_dataset_params = RangeDatasetParams(0, 5, 1);
auto rewrite_dataset_params =
RewriteDatasetParams(std::move(range_dataset_params),
kReplicateOnSplit,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
std::vector<Tensor> expected_outputs =
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}});
TF_ASSERT_OK(Initialize(rewrite_dataset_params));
TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/rewrite_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/rewrite_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
64d302c1-c19a-4cfc-83cc-4955f74832d2 | cpp | tensorflow/tensorflow | map_dataset_op | tensorflow/core/kernels/data/map_dataset_op.cc | tensorflow/core/kernels/data/map_dataset_op_test.cc | #include "tensorflow/core/kernels/data/map_dataset_op.h"
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
namespace data {
constexpr const char* const MapDatasetOp::kDatasetType;
constexpr const char* const MapDatasetOp::kInputDataset;
constexpr const char* const MapDatasetOp::kOtherArguments;
constexpr const char* const MapDatasetOp::kFunc;
constexpr const char* const MapDatasetOp::kTarguments;
constexpr const char* const MapDatasetOp::kOutputTypes;
constexpr const char* const MapDatasetOp::kOutputShapes;
constexpr const char* const MapDatasetOp::kUseInterOpParallelism;
constexpr const char* const MapDatasetOp::kPreserveCardinality;
constexpr const char* const MapDatasetOp::kForceSynchronous;
class MapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
bool preserve_cardinality, bool force_synchronous)
: DatasetBase(DatasetContext(ctx)),
input_(input),
preserve_cardinality_(preserve_cardinality),
force_synchronous_(force_synchronous),
captured_func_(std::move(captured_func)),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (preserve_cardinality_) {
return input_->Cardinality(options);
} else {
return kUnknownCardinality;
}
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(input_->Get(ctx, index, &args));
if (!instantiated_captured_func_) {
TF_RETURN_IF_ERROR(
captured_func_->Instantiate(InstantiateCapturedFunctionParams(ctx),
&instantiated_captured_func_));
}
return instantiated_captured_func_->RunInstantiated(args, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f_attr;
b->BuildAttrValue(captured_func_->func(), &f_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
AttrValue use_inter_op_parallelism_attr;
b->BuildAttrValue(captured_func_->use_inter_op_parallelism(),
&use_inter_op_parallelism_attr);
AttrValue preserve_cardinality_attr;
b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);
AttrValue force_synchronous_attr;
b->BuildAttrValue(force_synchronous_, &force_synchronous_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {std::make_pair(0, input_graph_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f_attr),
std::make_pair(kTarguments, other_arguments_types_attr),
std::make_pair(kUseInterOpParallelism, use_inter_op_parallelism_attr),
std::make_pair(kPreserveCardinality, preserve_cardinality_attr),
std::make_pair(kForceSynchronous, force_synchronous_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &args, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
Status s = instantiated_captured_func_->Run(ctx, std::move(args),
out_tensors, model_node());
if (errors::IsOutOfRange(s)) {
if (dataset()->preserve_cardinality_) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ", s.message());
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
if (!s.ok()) {
return AddErrorContext(s);
}
return s;
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args), 1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
};
const DatasetBase* const input_;
const bool preserve_cardinality_;
const bool force_synchronous_;
const std::unique_ptr<CapturedFunction> captured_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
mutable std::unique_ptr<InstantiatedCapturedFunction>
instantiated_captured_func_;
absl::Status random_indexing_compatible_;
};
MapDatasetOp::MapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
FunctionMetadata::Params params;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseInterOpParallelism,
¶ms.use_inter_op_parallelism));
OP_REQUIRES_OK(ctx,
FunctionMetadata::Create(ctx, kFunc, params, &func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kPreserveCardinality, &preserve_cardinality_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kForceSynchronous, &force_synchronous_));
}
void MapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output =
new Dataset(ctx, input, std::move(captured_func), output_types_,
output_shapes_, preserve_cardinality_, force_synchronous_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("MapDataset").Device(DEVICE_CPU), MapDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalMapDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle"),
MapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("MapDataset");
}
}
} | #include "tensorflow/core/kernels/data/map_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "map_dataset";
class MapDatasetOpTest : public DatasetOpsTestBase {};
MapDatasetParams MapDatasetParams1() {
auto map_dataset_params_0 = MapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
true,
"map_dataset_0");
return MapDatasetParams(
std::move(map_dataset_params_0),
{},
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
true,
"map_dataset_1");
}
MapDatasetParams MapDatasetParams2() {
auto batch_dataset_params =
BatchDatasetParams(RangeDatasetParams(10, 0, -3),
2,
false,
true,
{DT_INT64},
{PartialTensorShape({2})},
"batch_dataset");
return MapDatasetParams(
std::move(batch_dataset_params),
{},
FunctionDefHelper::FunctionRef("XAddX", {{"T", DT_INT64}}),
{test::function::XAddX()},
{},
{DT_INT64},
{PartialTensorShape({1})},
true,
false,
kNodeName);
}
MapDatasetParams MapDatasetParams3() {
return MapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
FunctionDefHelper::FunctionRef("XTimesFour", {{"T", DT_INT64}}),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
true,
kNodeName);
}
std::vector<GetNextTestCase<MapDatasetParams>> GetNextTestCases() {
return {{MapDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})},
{MapDatasetParams2(),
CreateTensors<int64_t>(TensorShape({2}), {{20, 14}, {8, 2}})},
{MapDatasetParams3(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})}};
}
ITERATOR_GET_NEXT_TEST_P(MapDatasetOpTest, MapDatasetParams, GetNextTestCases())
TEST_F(MapDatasetOpTest, DatasetNodeName) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(MapDatasetOpTest, DatasetTypeString) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(MapDatasetOp::kDatasetType)));
}
TEST_F(MapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(MapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<MapDatasetParams>> CardinalityTestCases() {
return {{MapDatasetParams1(),
4},
{MapDatasetParams2(),
kUnknownCardinality},
{MapDatasetParams3(),
4}};
}
DATASET_CARDINALITY_TEST_P(MapDatasetOpTest, MapDatasetParams,
CardinalityTestCases())
TEST_F(MapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(MapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(MapDatasetOpTest, IteratorPrefix) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
MapDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<MapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{MapDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})},
{MapDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({2}), {{20, 14}, {8, 2}})},
{MapDatasetParams3(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(MapDatasetOpTest, MapDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/map_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/map_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb352e40-0206-4615-9ec1-0137e0066b13 | cpp | tensorflow/tensorflow | cache_dataset_ops | tensorflow/core/kernels/data/cache_dataset_ops.cc | tensorflow/core/kernels/data/cache_dataset_ops_test.cc | #include "tensorflow/core/kernels/data/cache_dataset_ops.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/cache_ops.h"
#include "tensorflow/core/kernels/data/iterator_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
namespace data {
constexpr const char* const CacheDatasetOp::kDatasetType;
constexpr const char* const CacheDatasetOp::kInputDataset;
constexpr const char* const CacheDatasetOp::kFileName;
constexpr const char* const CacheDatasetOp::kOutputTypes;
constexpr const char* const CacheDatasetOp::kOutputShapes;
namespace {
constexpr char kKeyStrFormat[] = "%%%zuzu_%%%zuzu";
constexpr char kPaddingSizeStrFormat[] = "%zu";
constexpr char kFileDatasetPrefix[] = "File";
constexpr char kMode[] = "Mode";
constexpr char kLockFileSuffix[] = ".lockfile";
constexpr char kIterationCompleted[] = "iteration_completed";
constexpr char kCurIndex[] = "cur_index";
constexpr char kShardId[] = "shard_id";
constexpr char kCreatedAt[] = "Created at";
constexpr char kMemoryDatasetPrefix[] = "Memory";
constexpr char kMemoryCache[] = "MemoryCache";
constexpr char kCacheCompleted[] = "cache_completed";
constexpr char kIndex[] = "index";
constexpr char kImpl[] = "Impl";
constexpr char kCacheDataset[] = "CacheDataset";
constexpr char kIncompleteCacheErrorMessage[] =
"The calling iterator did not fully read the dataset being cached. In "
"order to avoid unexpected truncation of the dataset, the partially cached "
"contents of the dataset will be discarded. This can happen if you have "
"an input pipeline similar to `dataset.cache().take(k).repeat()`. You "
"should use `dataset.take(k).cache().repeat()` instead.";
}
class DatasetRandomAccessCache {
public:
explicit DatasetRandomAccessCache(const DatasetBase* dataset)
: input_(dataset) {}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) {
if (!iter_resource_) {
TF_ASSIGN_OR_RETURN(iter_resource_,
GetIteratorResourceFromDataset(ctx, input_));
TF_RETURN_IF_ERROR(iter_resource_->SetIteratorFromDataset(ctx, input_));
}
if (index >= cache_.size()) {
TF_RETURN_IF_ERROR(ExtendTempCacheToIndex(index, ctx));
}
*out_tensors = cache_.at(index);
return absl::OkStatus();
}
std::vector<std::vector<Tensor>> GetCacheData() { return cache_; }
private:
Status ExtendTempCacheToIndex(int64 index, OpKernelContext* ctx) {
bool end_of_sequence;
while (cache_.size() <= index) {
std::vector<Tensor> out_tensors;
TF_RETURN_IF_ERROR(
iter_resource_->GetNext(ctx, &out_tensors, &end_of_sequence));
if (end_of_sequence) {
return tensorflow::errors::OutOfRange("Index out of range [0, ",
cache_.size(), "):", index);
}
cache_.push_back(out_tensors);
}
return absl::OkStatus();
}
absl::StatusOr<core::RefCountPtr<IteratorResource>>
GetIteratorResourceFromDataset(OpKernelContext* ctx,
const DatasetBase* dataset) {
FunctionLibraryRuntime* flr;
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> plfr(nullptr);
TF_RETURN_IF_ERROR(
ctx->function_library()->Clone(&flib_def, &plfr, &flr, true));
core::RefCountPtr<IteratorResource> iter_resource(new IteratorResource(
ctx->env(), dataset->output_dtypes(), dataset->output_shapes(),
std::move(device_mgr), std::move(flib_def), std::move(plfr), flr));
return iter_resource;
}
const DatasetBase* input_;
core::RefCountPtr<IteratorResource> iter_resource_;
std::vector<std::vector<Tensor>> cache_;
};
class IteratorRandomAccessCache {
public:
explicit IteratorRandomAccessCache(const DatasetBase* input)
: input_(input) {}
absl::Status Get(AnyContext ctx, size_t element_position,
std::vector<Tensor>* out_tensors) {
if (element_position < cache_.size() && !cache_[element_position].empty()) {
*out_tensors = cache_[element_position];
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(input_->Get(ctx, element_position, out_tensors));
if (element_position >= cache_.size()) {
cache_.resize(element_position + 1);
}
cache_[element_position] = *out_tensors;
return absl::OkStatus();
}
private:
const DatasetBase* input_ = nullptr;
std::vector<std::vector<Tensor>> cache_;
};
class CacheDatasetOp::FileDatasetBase : public DatasetBase {
public:
FileDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
string filename, Env* env)
: DatasetBase(DatasetContext(ctx)),
input_(input),
filename_(std::move(filename)),
env_(env),
num_tensors_(input->output_dtypes().size()),
tensor_index_padding_size_(StringPaddingSize(num_tensors_)),
item_index_padding_size_(StringPaddingSize(kMaxItems)),
tensor_format_string_(strings::Printf(kKeyStrFormat,
item_index_padding_size_,
tensor_index_padding_size_)) {
input_->Ref();
DCHECK_EQ(item_index_padding_size_, 7);
}
~FileDatasetBase() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.dataset_prefix = kFileDatasetPrefix;
return std::make_unique<FileIterator>(FileIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.dataset_prefix = kFileDatasetPrefix;
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
const DatasetBase* const input_;
const tstring filename_;
private:
static size_t StringPaddingSize(size_t num_tensors) {
return strings::Printf(kPaddingSizeStrFormat, num_tensors - 1).size();
}
string FormatName(size_t item_index, size_t tensor_index) const {
return strings::Printf(tensor_format_string_.c_str(), item_index,
tensor_index);
}
class FileIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params) {
if (params.dataset->env_
->FileExists(MetaFilename(params.dataset->filename_))
.ok()) {
mode_ = Mode::read;
} else {
mode_ = Mode::write;
}
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return InitializeIterator(ctx);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
return iterator_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kMode, mode_));
return SaveInput(ctx, writer, iterator_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kMode, &temp));
mode_ = static_cast<Mode>(temp);
}
if (mode_ == Mode::write &&
dataset()
->env_->FileExists(MetaFilename(dataset()->filename_))
.ok()) {
LOG(WARNING)
<< "It looks like the cache was already completely written("
<< MetaFilename(dataset()->filename_)
<< ") after the last checkpoint was saved. Attempting to read "
<< "the cache instead of continuing to write. If this is a "
<< "mistake, please remove the above file and try running again.";
mode_ = Mode::read;
}
TF_RETURN_IF_ERROR(InitializeIterator(ctx));
return RestoreInput(ctx, reader, iterator_);
}
private:
class FileWriterIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileWriterIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params),
cur_index_(0),
shard_id_(0),
filename_(
strings::StrCat(params.dataset->filename_, "_", shard_id_)),
lockfile_(strings::StrCat(filename_, kLockFileSuffix)),
lockfile_created_(false),
iteration_completed_(false) {}
~FileWriterIterator() override {
if (!dataset()->env_->FileExists(MetaFilename(filename_)).ok()) {
LOG(WARNING) << kIncompleteCacheErrorMessage;
std::vector<string> cache_files;
Status s = dataset()->env_->GetMatchingPaths(
strings::StrCat(filename_, "*"), &cache_files);
if (!s.ok()) {
LOG(WARNING) << "Failed to get matching files on " << filename_
<< "* : " << s.ToString();
}
for (const string& path : cache_files) {
s = dataset()->env_->DeleteFile(path);
if (!s.ok()) {
LOG(WARNING) << "Failed to delete " << path << " : "
<< s.ToString();
}
}
}
}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(),
&input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
TF_RETURN_IF_ERROR(EnsureLockFileExists(end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(writer_->status());
if (cur_index_ >= kMaxItems) {
Status s = Finish();
if (!s.ok()) {
LOG(ERROR) << s;
}
return errors::InvalidArgument(
"Upstream iterator is producing more than ", kMaxItems,
" items, which is more than the cache limit.");
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence && out_tensors->empty()) {
TF_RETURN_IF_ERROR(Finish());
cur_index_++;
return absl::OkStatus();
}
if (out_tensors->size() != dataset()->num_tensors_) {
return errors::Internal(
"Upstream iterator returned invalid number of tensors. "
"Expected ",
dataset()->num_tensors_, " got: ", out_tensors->size());
}
size_t tensor_index = 0;
for (const Tensor& t : *out_tensors) {
DCHECK_LT(tensor_index, dataset()->num_tensors_);
string key = dataset()->FormatName(cur_index_, tensor_index++);
TF_RETURN_IF_ERROR(writer_->Add(key, t));
}
if (*end_of_sequence) {
TF_RETURN_IF_ERROR(Finish());
}
cur_index_++;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurIndex, cur_index_));
if (iteration_completed_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kIterationCompleted, ""));
return absl::OkStatus();
}
if (lockfile_created_) {
TF_RETURN_IF_ERROR(writer_->Finish());
shard_id_++;
filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_);
lockfile_ = strings::StrCat(filename_, kLockFileSuffix);
lockfile_created_ = false;
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kShardId, shard_id_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t temp;
{
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &temp));
cur_index_ = static_cast<size_t>(temp);
if (cur_index_ != temp) {
return errors::Internal("Invalid value for cur_index ", temp);
}
}
if (reader->Contains(prefix(), kIterationCompleted)) {
iteration_completed_ = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
{
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kShardId, &temp));
shard_id_ = static_cast<size_t>(temp);
if (shard_id_ != temp) {
return errors::Internal("Invalid value for shard_id ", temp);
}
}
filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_);
lockfile_ = strings::StrCat(filename_, kLockFileSuffix);
writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_);
return absl::OkStatus();
}
private:
Status EnsureLockFileExists(bool* end_of_sequence)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (iteration_completed_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (lockfile_created_) {
return absl::OkStatus();
}
if (dataset()->env_->FileExists(MetaFilename(filename_)).ok()) {
return errors::AlreadyExists("Existing cache files found: \n",
MetaFilename(filename_), "\n",
DataFilename(filename_, 0, 1), "\n",
"To continue delete the above files.");
}
if (dataset()->env_->FileExists(lockfile_).ok()) {
char contents_scratch[151] = {0};
StringPiece contents;
std::unique_ptr<RandomAccessFile> file;
if (dataset()->env_->NewRandomAccessFile(lockfile_, &file).ok()) {
file->Read(0, 150, &contents, contents_scratch).IgnoreError();
}
return errors::AlreadyExists(
"There appears to be a concurrent caching iterator running - "
"cache lockfile already exists ('",
lockfile_,
"'). If you are sure no other running TF computations are "
"using this cache prefix, delete the lockfile and "
"re-initialize the iterator. Lockfile contents: ",
contents);
}
std::unique_ptr<WritableFile> lockfile;
TF_RETURN_IF_ERROR(
dataset()->env_->NewWritableFile(lockfile_, &lockfile));
TF_RETURN_IF_ERROR(lockfile->Append(
strings::StrCat(kCreatedAt, ": ", EnvTime::NowSeconds())));
writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_);
lockfile_created_ = true;
return absl::OkStatus();
}
Status Finish() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
iteration_completed_ = true;
TF_RETURN_IF_ERROR(writer_->Finish());
{
std::vector<tstring> prefixes;
prefixes.reserve(shard_id_ + 1);
for (size_t i = 0; i <= shard_id_; ++i) {
prefixes.emplace_back(
strings::StrCat(dataset()->filename_, "_", i));
}
TF_RETURN_IF_ERROR(
MergeBundles(dataset()->env_, prefixes, dataset()->filename_));
}
for (size_t i = 0; i <= shard_id_; ++i) {
TF_RETURN_IF_ERROR(dataset()->env_->DeleteFile(
strings::StrCat(dataset()->filename_, "_", i, kLockFileSuffix)));
}
return absl::OkStatus();
}
mutex mu_;
size_t cur_index_ TF_GUARDED_BY(mu_);
size_t shard_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
string filename_;
std::unique_ptr<BundleWriter> writer_ TF_GUARDED_BY(mu_);
string lockfile_ TF_GUARDED_BY(mu_);
bool lockfile_created_ TF_GUARDED_BY(mu_);
bool iteration_completed_ TF_GUARDED_BY(mu_);
};
class FileReaderIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileReaderIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params),
cur_index_(0),
reader_(dataset()->env_, dataset()->filename_),
iterator_restored_(false) {}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
TF_RETURN_IF_ERROR(reader_.status());
if (!reader_.Valid()) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->resize(dataset()->num_tensors_);
for (size_t i = 0; i < dataset()->num_tensors_; ++i) {
if (!iterator_restored_) {
reader_.Next();
} else {
iterator_restored_ = false;
}
if (!reader_.Valid()) {
out_tensors->clear();
*end_of_sequence = true;
return absl::OkStatus();
}
StringPiece key = reader_.key();
DCHECK_EQ(key, dataset()->FormatName(cur_index_, i));
TF_RETURN_IF_ERROR(reader_.ReadCurrent(&(*out_tensors)[i]));
TF_RETURN_IF_ERROR(reader_.status());
}
cur_index_++;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurIndex, cur_index_));
return absl::OkStatus();
}
Status RestoreInternal(
IteratorContext* ctx,
IteratorStateReader* iterator_state_reader) override {
mutex_lock l(mu_);
{
int64_t temp;
TF_RETURN_IF_ERROR(
iterator_state_reader->ReadScalar(prefix(), kCurIndex, &temp));
cur_index_ = static_cast<size_t>(temp);
if (cur_index_ != temp) {
return errors::Internal("Invalid value for cur_index ", temp);
}
}
if (!reader_.Valid()) {
return errors::Internal("Error initializing BundleReader.");
}
reader_.Seek(dataset()->FormatName(cur_index_, 0));
iterator_restored_ = true;
return absl::OkStatus();
}
private:
mutex mu_;
size_t cur_index_ TF_GUARDED_BY(mu_);
BundleReader reader_ TF_GUARDED_BY(mu_);
bool iterator_restored_ TF_GUARDED_BY(mu_);
};
Status InitializeIterator(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
switch (mode_) {
case Mode::read:
iterator_ =
std::make_unique<FileReaderIterator>(FileReaderIterator::Params{
dataset(), strings::StrCat(prefix(), kImpl)});
break;
case Mode::write:
iterator_ =
std::make_unique<FileWriterIterator>(FileWriterIterator::Params{
dataset(), strings::StrCat(prefix(), kImpl)});
}
TF_RETURN_IF_ERROR(iterator_->InitializeBase(ctx, this));
return iterator_->Initialize(ctx);
}
mutex mu_;
enum Mode { read, write };
Mode mode_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> iterator_ TF_GUARDED_BY(mu_);
};
Env* const env_;
const size_t num_tensors_;
const size_t tensor_index_padding_size_;
static constexpr size_t kMaxItems = 10000000;
const size_t item_index_padding_size_;
const string tensor_format_string_;
};
class CacheDatasetOp::FileDataset : public CacheDatasetOp::FileDatasetBase {
public:
using FileDatasetBase::FileDatasetBase;
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph));
Node* filename = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph, filename}, output));
return absl::OkStatus();
}
};
class CacheDatasetOp::FileDatasetV2 : public CacheDatasetOp::FileDatasetBase {
public:
explicit FileDatasetV2(OpKernelContext* ctx, const DatasetBase* input,
string filename, Env* env,
const Tensor& resource_handle)
: FileDatasetBase(ctx, input, filename, env),
resource_handle_(resource_handle) {}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* filename_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename_node));
Node* resource_handle_node = nullptr;
TF_RETURN_IF_ERROR(b->AddTensor(resource_handle_, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_node, filename_node, resource_handle_node}, output));
return absl::OkStatus();
}
private:
const Tensor resource_handle_;
};
class CacheDatasetOp::MemoryDatasetBase : public DatasetBase {
public:
explicit MemoryDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
std::shared_ptr<MemoryCache> cache)
: DatasetBase(DatasetContext(ctx)),
input_(input),
cache_(std::move(cache)) {
input_->Ref();
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
~MemoryDatasetBase() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.dataset_prefix = kMemoryDatasetPrefix;
return std::make_unique<MemoryIterator>(
MemoryIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)},
cache_.get());
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.dataset_prefix = kMemoryDatasetPrefix;
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
};
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
mutex_lock l(mu_);
CardinalityOptions options;
options.set_compute_level(CardinalityOptions::CARDINALITY_COMPUTE_LOW);
int64_t cardinality = Cardinality(options);
if (cardinality != kUnknownCardinality &&
cardinality != kInfiniteCardinality && index >= cardinality) {
return errors::OutOfRange("Index out of range [0, ", cardinality,
"):", index);
}
if (!dataset_random_access_cache_) {
dataset_random_access_cache_ =
std::make_unique<DatasetRandomAccessCache>(input_);
}
return dataset_random_access_cache_->Get(ctx, index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
mutex_lock l(mu_);
if (!iterator_random_access_cache_) {
iterator_random_access_cache_ =
std::make_unique<IteratorRandomAccessCache>(input_);
}
return iterator_random_access_cache_->Get(ctx, index, out_tensors);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
class MemoryIterator : public DatasetIterator<MemoryDatasetBase> {
public:
explicit MemoryIterator(const Params& params, MemoryCache* cache)
: DatasetIterator<MemoryDatasetBase>(params),
cache_(cache),
global_shuffle_iterator_(dataset()) {}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return InitializeIterator(ctx);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
mutex_lock l(mu_);
return iterator_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (cache_->IsCompleted()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCacheCompleted, ""));
TF_RETURN_IF_ERROR(
WriteElementsToCheckpoint(writer, prefix(), cache_->data()));
}
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return SaveInput(ctx, writer, iterator_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
mutex_lock l(mu_);
iterator_.reset();
cache_->Reset();
if (reader->Contains(prefix(), kCacheCompleted)) {
std::vector<std::vector<Tensor>> temp_cache;
TF_RETURN_IF_ERROR(
ReadElementsFromCheckpoint(ctx, reader, prefix(), &temp_cache));
cache_->Complete(std::move(temp_cache));
}
TF_RETURN_IF_ERROR(InitializeIterator(ctx));
return RestoreInput(ctx, reader, iterator_);
}
private:
class MemoryWriterIterator : public DatasetIterator<MemoryDatasetBase> {
public:
explicit MemoryWriterIterator(const Params& params, MemoryCache* cache)
: DatasetIterator<MemoryDatasetBase>(params), cache_(cache) {}
~MemoryWriterIterator() override {
mutex_lock l(mu_);
if (!temp_cache_.empty() && !cache_->IsCompleted()) {
LOG(WARNING) << kIncompleteCacheErrorMessage;
cache_->Reset();
}
}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(),
&input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
if (!cache_->IsCompleted()) {
VLOG(2) << "Finalizing the cache because EOF has been reached.";
cache_->Complete(std::move(temp_cache_));
}
return absl::OkStatus();
}
RecordBufferEnqueue(ctx, *out_tensors);
temp_cache_.emplace_back(*out_tensors);
if (temp_cache_.size() == dataset()->input_->Cardinality()) {
VLOG(2) << "Finalizing the cache because its size matches the "
"expected input cardinality.";
cache_->Complete(std::move(temp_cache_));
}
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (!cache_->IsCompleted()) {
TF_RETURN_IF_ERROR(
WriteElementsToCheckpoint(writer, prefix(), temp_cache_));
}
return SaveInput(ctx, writer, input_impl_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (!reader->Contains(prefix(), kCacheCompleted)) {
TF_RETURN_IF_ERROR(
ReadElementsFromCheckpoint(ctx, reader, prefix(), &temp_cache_));
}
return RestoreInput(ctx, reader, input_impl_);
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
MemoryCache* const cache_ TF_GUARDED_BY(mu_);
std::vector<std::vector<Tensor>> temp_cache_ TF_GUARDED_BY(mu_);
};
class MemoryReaderIterator : public DatasetIterator<MemoryDatasetBase> {
public:
explicit MemoryReaderIterator(const Params& params, MemoryCache* cache)
: DatasetIterator<MemoryDatasetBase>(params),
cache_(cache),
index_(0) {}
Status Initialize(IteratorContext* ctx) override {
tf_shared_lock l(mu_);
for (size_t i = 0; i < cache_->size(); ++i) {
RecordBufferEnqueue(ctx, cache_->at(i));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (index_ < cache_->size()) {
const std::vector<Tensor>& cache_tensors = cache_->at(index_);
out_tensors->insert(out_tensors->begin(), cache_tensors.begin(),
cache_tensors.end());
index_++;
*end_of_sequence = false;
return absl::OkStatus();
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kIndex, index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
{
int64_t temp = cache_->size();
if (reader->Contains(prefix(), kIndex)) {
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kIndex, &temp));
}
index_ = static_cast<size_t>(temp);
}
return absl::OkStatus();
}
private:
mutex mu_;
MemoryCache* const cache_ TF_GUARDED_BY(mu_);
size_t index_ TF_GUARDED_BY(mu_);
};
Status InitializeIterator(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (cache_->IsCompleted()) {
iterator_ = std::make_unique<MemoryReaderIterator>(
MemoryReaderIterator::Params{dataset(),
strings::StrCat(prefix(), kImpl)},
cache_);
} else {
iterator_ = std::make_unique<MemoryWriterIterator>(
MemoryWriterIterator::Params{dataset(),
strings::StrCat(prefix(), kImpl)},
cache_);
}
TF_RETURN_IF_ERROR(iterator_->InitializeBase(ctx, this));
return iterator_->Initialize(ctx);
}
mutex mu_;
MemoryCache* cache_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> iterator_ TF_GUARDED_BY(mu_);
GlobalShuffleIterator global_shuffle_iterator_;
};
mutable mutex mu_;
const DatasetBase* const input_;
const std::shared_ptr<MemoryCache> cache_;
mutable std::unique_ptr<DatasetRandomAccessCache> dataset_random_access_cache_
TF_GUARDED_BY(mu_);
mutable std::unique_ptr<IteratorRandomAccessCache>
iterator_random_access_cache_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
};
class CacheDatasetOp::MemoryDataset : public CacheDatasetOp::MemoryDatasetBase {
public:
MemoryDataset(OpKernelContext* ctx, const DatasetBase* input,
MemoryCacheManager* manager, ResourceHandle&& resource_handle)
: MemoryDatasetBase(ctx, input, manager->get()),
manager_(manager),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()) {}
~MemoryDataset() override {
manager_->Unref();
Status s = resource_mgr_->Delete<MemoryCacheManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete cache resource: " << s.ToString();
}
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* filename_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(tstring(""), &filename_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_node, filename_node}, output));
return absl::OkStatus();
}
private:
MemoryCacheManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
};
class CacheDatasetOp::MemoryDatasetV2
: public CacheDatasetOp::MemoryDatasetBase {
public:
MemoryDatasetV2(OpKernelContext* ctx, const DatasetBase* input,
MemoryCacheManager* manager, ResourceHandle&& resource_handle,
bool owns_resource)
: MemoryDatasetBase(ctx, input, manager->get()),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()) {}
~MemoryDatasetV2() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<MemoryCacheManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete cache resource: " << s.ToString();
}
}
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* filename_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(tstring(""), &filename_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_node, filename_node, resource_handle_node}, output));
return absl::OkStatus();
}
private:
MemoryCacheManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
};
CacheDatasetOp::CacheDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->def().op() == kCacheDataset ? 1 : 2) {}
void CacheDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
tstring filename;
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kFileName, &filename));
if (filename.empty()) {
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kMemoryCache, "_",
resource_id_counter.fetch_add(1));
if (op_version_ == 2) {
bool owns_resource = false;
MemoryCacheManager* manager = nullptr;
auto handle = HandleFromInput(ctx, 2);
Status s = ctx->resource_manager()->Lookup<MemoryCacheManager>(
handle.container(), handle.name(), &manager);
if (errors::IsNotFound(s)) {
owns_resource = true;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<MemoryCacheManager>(
container, name, &manager, [](MemoryCacheManager** manager) {
*manager = new MemoryCacheManager();
return absl::OkStatus();
}));
handle = MakeResourceHandle<MemoryCacheManager>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output = new MemoryDatasetV2(ctx, input, manager, std::move(handle),
owns_resource);
} else {
MemoryCacheManager* manager;
OP_REQUIRES_OK(
ctx, ctx->resource_manager()->LookupOrCreate<MemoryCacheManager>(
container, name, &manager, [](MemoryCacheManager** manager) {
*manager = new MemoryCacheManager();
return absl::OkStatus();
}));
auto handle =
MakeResourceHandle<MemoryCacheManager>(ctx, container, name);
*output = new MemoryDataset(ctx, input, manager, std::move(handle));
}
} else {
if (op_version_ == 2) {
*output =
new FileDatasetV2(ctx, input, filename, ctx->env(), ctx->input(2));
} else {
*output = new FileDataset(ctx, input, filename, ctx->env());
}
}
}
namespace {
REGISTER_KERNEL_BUILDER(Name("CacheDataset").Device(DEVICE_CPU),
CacheDatasetOp);
REGISTER_KERNEL_BUILDER(Name("CacheDatasetV2").Device(DEVICE_CPU),
CacheDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/cache_dataset_ops.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/platform/path.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "cache_dataset";
constexpr char kFileDatasetPrefix[] = "File";
constexpr char kMemoryDatasetPrefix[] = "Memory";
class CacheDatasetParams : public DatasetParams {
public:
template <typename T>
CacheDatasetParams(T input_dataset_params, string filename,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
filename_(filename) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
Tensor filename_tensor =
CreateTensor<tstring>(TensorShape({}), {filename_});
return {filename_tensor};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {CacheDatasetOp::kInputDataset, CacheDatasetOp::kFileName};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return CacheDatasetOp::kDatasetType; }
string filename() const { return filename_; }
private:
string filename_;
};
class CacheDatasetOpTest : public DatasetOpsTestBase {
public:
Status Initialize(const DatasetParams& dataset_params) {
TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params));
auto params = static_cast<const CacheDatasetParams&>(dataset_params);
cache_filename_ = params.filename();
return absl::OkStatus();
}
~CacheDatasetOpTest() override {
if (!cache_filename_.empty()) {
std::vector<string> cache_files;
Status s = device_->env()->GetMatchingPaths(
strings::StrCat(cache_filename_, "*"), &cache_files);
if (!s.ok()) {
LOG(WARNING) << "Failed to get matching files on " << cache_filename_
<< "* : " << s.ToString();
}
for (const string& path : cache_files) {
s = device_->env()->DeleteFile(path);
if (!s.ok()) {
LOG(WARNING) << "Failed to delete " << path << " : " << s.ToString();
}
}
}
}
protected:
tstring cache_filename_;
};
CacheDatasetParams CacheDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return CacheDatasetParams(
std::move(tensor_slice_dataset_params),
io::JoinPath(testing::TmpDir(), "cache_data"),
{DT_INT64},
{PartialTensorShape({3, 1})}, kNodeName);
}
CacheDatasetParams CacheDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice");
return CacheDatasetParams(
std::move(tensor_slice_dataset_params),
io::JoinPath(testing::TmpDir(), "cache_data"),
{DT_INT64},
{PartialTensorShape({})}, kNodeName);
}
CacheDatasetParams CacheDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return CacheDatasetParams(std::move(tensor_slice_dataset_params),
"",
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
CacheDatasetParams CacheDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice");
return CacheDatasetParams(std::move(tensor_slice_dataset_params),
"",
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<CacheDatasetParams>> GetNextTestCases() {
return {{CacheDatasetParams1(),
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams2(),
{}},
{CacheDatasetParams3(),
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams4(),
{}}};
}
class ParameterizedGetNextTest : public CacheDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<CacheDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
end_of_sequence = false;
out_tensors.clear();
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(CacheDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(CacheDatasetOpTest, DatasetNodeName) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(CacheDatasetOpTest, DatasetTypeString) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(CacheDatasetOp::kDatasetType)));
}
TEST_F(CacheDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<CacheDatasetParams>>
DatasetOutputShapesTestCases() {
return {{CacheDatasetParams1(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams2(),
{PartialTensorShape({})}},
{CacheDatasetParams3(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams4(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<CacheDatasetParams>> CardinalityTestCases() {
return {{CacheDatasetParams1(),
3},
{CacheDatasetParams2(),
0},
{CacheDatasetParams3(),
3},
{CacheDatasetParams4(),
0}};
}
DATASET_CARDINALITY_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
CardinalityTestCases())
TEST_F(CacheDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<CacheDatasetParams>>
IteratorOutputShapesTestCases() {
return {{CacheDatasetParams1(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams2(),
{PartialTensorShape({})}},
{CacheDatasetParams3(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams4(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(CacheDatasetOpTest, IteratorPrefix) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams iterator_prefix_params;
iterator_prefix_params.dataset_prefix =
cache_filename_.empty() ? kMemoryDatasetPrefix : kFileDatasetPrefix;
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
CacheDatasetOp::kDatasetType, dataset_params.iterator_prefix(),
iterator_prefix_params)));
}
std::vector<IteratorSaveAndRestoreTestCase<CacheDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{CacheDatasetParams1(),
{0, 2, 4, 11},
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams2(),
{0, 2, 4, 11},
{}},
{CacheDatasetParams3(),
{0, 2, 4, 11},
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams4(),
{0, 2, 4, 11},
{}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public CacheDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<CacheDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
if (cache_filename_.empty()) {
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
}
end_of_sequence = false;
out_tensors.clear();
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
}
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
int cur_iteration = 0;
auto expected_outputs_it = test_case.expected_outputs.begin();
for (int breakpoint : test_case.breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(out_tensors.back(), *expected_outputs_it));
expected_outputs_it++;
}
cur_iteration++;
}
if (breakpoint >= dataset_->Cardinality()) {
EXPECT_TRUE(end_of_sequence);
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
} else {
EXPECT_FALSE(end_of_sequence);
}
}
}
INSTANTIATE_TEST_CASE_P(CacheDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/cache_dataset_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/cache_dataset_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49ddf85c-a7a2-47ba-973b-ea18e73dc05d | cpp | tensorflow/tensorflow | get_options_op | tensorflow/core/kernels/data/get_options_op.cc | tensorflow/core/kernels/data/get_options_op_test.cc | #include "tensorflow/core/kernels/data/get_options_op.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
void GetOptionsOp::Compute(OpKernelContext* ctx) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
if (ctx->status().ok()) {
Tensor* string_handle_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &string_handle_t));
string_handle_t->scalar<tstring>()() = input->options().SerializeAsString();
}
}
string GetOptionsOp::TraceString(const OpKernelContext& ctx,
bool verbose) const {
return tsl::profiler::TraceMeOp(name_view(), type_string_view());
}
namespace {
REGISTER_KERNEL_BUILDER(Name("GetOptions").Device(DEVICE_CPU).Priority(2),
GetOptionsOp);
REGISTER_KERNEL_BUILDER(Name("GetOptions")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("serialized_options")
.Priority(1),
GetOptionsOp);
}
}
} | #include "tensorflow/core/kernels/data/get_options_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptions[] = R"proto(
deterministic: true
slack: true
optimization_options { apply_default_optimizations: true autotune: true }
distribute_options {}
)proto";
class GetOptionsParams : public DatasetParams {
public:
template <typename T>
GetOptionsParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(OptionsDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
return absl::OkStatus();
}
string dataset_type() const override { return "GetOptions"; }
string op_name() const override { return dataset_type(); }
private:
string serialized_options_;
};
class GetOptionsOpTest : public DatasetOpsTestBase {};
OptionsDatasetParams OptionsDatasetParams0() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
GetOptionsParams GetOptionsParams0() {
return GetOptionsParams(OptionsDatasetParams0(),
{DT_INT64},
{PartialTensorShape({})},
"get_options_0");
}
TEST_F(GetOptionsOpTest, Compute) {
auto test_case_params = GetOptionsParams0();
TF_ASSERT_OK(InitializeRuntime(test_case_params));
std::vector<Tensor> output;
TF_ASSERT_OK(RunDatasetOp(test_case_params, &output));
EXPECT_EQ(1, output.size());
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
Tensor expected_tensor =
CreateTensor<tstring>(TensorShape({}), {options.SerializeAsString()});
Tensor result_tensor = output[0];
string serialized_options = result_tensor.scalar<tstring>()();
Options result_options;
result_options.ParseFromString(serialized_options);
TF_EXPECT_OK(ExpectEqual(expected_tensor, result_tensor));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/get_options_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/get_options_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5f375a3-9c49-4b88-aed3-8a4b684c5d55 | cpp | tensorflow/tensorflow | sparse_tensor_slice_dataset_op | tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc | tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op_test.cc | #include <numeric>
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
namespace data {
namespace {
template <typename T>
class Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx,
const sparse::SparseTensor& sparse_tensor)
: DatasetBase(DatasetContext(ctx)),
sparse_tensor_(sparse_tensor),
dtypes_({DT_INT64, sparse_tensor.dtype(), DT_INT64}),
shapes_({{-1, sparse_tensor.dims() - 1},
{-1},
{sparse_tensor.dims() - 1}}) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(typename Iterator::Params{
this, strings::StrCat(prefix, "::SparseTensorSlice")});
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
string DebugString() const override {
return "SparseTensorSliceDatasetOp::Dataset";
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return sparse_tensor_.shape()[0];
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* indices_node;
TF_RETURN_IF_ERROR(b->AddTensor(sparse_tensor_.indices(), &indices_node));
Node* value_node;
TF_RETURN_IF_ERROR(b->AddTensor(sparse_tensor_.values(), &value_node));
Node* dense_shape_node;
std::vector<int64_t> dense_shape;
dense_shape.reserve(sparse_tensor_.shape().size());
for (int i = 0; i < sparse_tensor_.shape().size(); i++)
dense_shape.emplace_back(sparse_tensor_.shape()[i]);
TF_RETURN_IF_ERROR(b->AddVector(dense_shape, &dense_shape_node));
AttrValue val_dtype;
b->BuildAttrValue(sparse_tensor_.dtype(), &val_dtype);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {indices_node, value_node, dense_shape_node},
{{"Tvalues", val_dtype}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset<T>> {
public:
explicit Iterator(const typename Iterator::Params& params)
: DatasetIterator<Dataset<T>>(params),
num_elements_(params.dataset->sparse_tensor_.shape()[0]),
dense_shape_(DT_INT64, {params.dataset->sparse_tensor_.dims() - 1}),
group_iterable_(params.dataset->sparse_tensor_.group({0})),
iter_(group_iterable_.begin()) {
for (size_t i = 0; i < dense_shape_.NumElements(); ++i) {
dense_shape_.vec<int64_t>()(i) =
params.dataset->sparse_tensor_.shape()[i + 1];
}
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (i_ == num_elements_) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->reserve(3);
const int rank = Iterator::dataset()->sparse_tensor_.dims();
if (i_ > next_non_empty_i_ && iter_ != group_iterable_.end()) {
sparse::Group group = *iter_;
const auto indices = group.indices();
const auto values = group.values<T>();
const int64_t num_entries = values.size();
next_non_empty_i_ = indices(0, 0);
next_indices_ = Tensor(DT_INT64, {num_entries, rank - 1});
next_values_ = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto next_indices_t = next_indices_.matrix<int64_t>();
auto next_values_t = next_values_.vec<T>();
for (int64_t i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
next_indices_t(i, d - 1) = indices(i, d);
}
next_values_t(i) = values(i);
}
++iter_;
}
if (i_ == next_non_empty_i_) {
out_tensors->push_back(std::move(next_indices_));
out_tensors->push_back(std::move(next_values_));
out_tensors->push_back(dense_shape_);
next_non_empty_i_ = kNextNonEmptyUnknown;
} else {
DCHECK(i_ < next_non_empty_i_ || iter_ == group_iterable_.end());
out_tensors->push_back(Tensor(DT_INT64, TensorShape({0, rank - 1})));
out_tensors->push_back(Tensor(DataTypeToEnum<T>::value, {0}));
out_tensors->push_back(dense_shape_);
}
++i_;
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(Iterator::prefix(), "i", i_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(Iterator::prefix(), "iter_loc", iter_.loc()));
TF_RETURN_IF_ERROR(writer->WriteScalar(
Iterator::prefix(), "next_non_empty_i_", next_non_empty_i_));
if (i_ <= next_non_empty_i_) {
TF_RETURN_IF_ERROR(writer->WriteTensor(Iterator::prefix(),
"next_indices_", next_indices_));
TF_RETURN_IF_ERROR(writer->WriteTensor(Iterator::prefix(),
"next_values_", next_values_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(Iterator::prefix(), "i", &i_));
int64_t iter_loc;
TF_RETURN_IF_ERROR(
reader->ReadScalar(Iterator::prefix(), "iter_loc", &iter_loc));
iter_ = group_iterable_.at(iter_loc);
TF_RETURN_IF_ERROR(reader->ReadScalar(
Iterator::prefix(), "next_non_empty_i_", &next_non_empty_i_));
if (i_ <= next_non_empty_i_) {
TF_RETURN_IF_ERROR(reader->ReadTensor(Iterator::prefix(),
"next_indices_", &next_indices_));
TF_RETURN_IF_ERROR(reader->ReadTensor(Iterator::prefix(),
"next_values_", &next_values_));
}
return absl::OkStatus();
}
private:
const int64_t num_elements_;
Tensor dense_shape_;
mutex mu_;
sparse::GroupIterable group_iterable_ TF_GUARDED_BY(mu_);
sparse::GroupIterable::IteratorStep iter_ TF_GUARDED_BY(mu_);
int64_t i_ TF_GUARDED_BY(mu_) = 0;
const int64_t kNextNonEmptyUnknown = -1;
int64_t next_non_empty_i_ TF_GUARDED_BY(mu_) = kNextNonEmptyUnknown;
Tensor next_indices_ TF_GUARDED_BY(mu_);
Tensor next_values_ TF_GUARDED_BY(mu_);
};
const sparse::SparseTensor sparse_tensor_;
const DataTypeVector dtypes_;
const std::vector<PartialTensorShape> shapes_;
};
template <typename T>
class SparseTensorSliceDatasetOp : public DatasetOpKernel {
public:
explicit SparseTensorSliceDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {}
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
const Tensor* indices;
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices));
const Tensor* values;
OP_REQUIRES_OK(ctx, ctx->input("values", &values));
const Tensor* dense_shape;
OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()),
errors::InvalidArgument("Input indices must be a matrix. Got: ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument("Input values must be a vector. Got: ",
values->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()),
errors::InvalidArgument("Input shape must be a vector. Got: ",
dense_shape->shape().DebugString()));
OP_REQUIRES(
ctx, values->shape().dim_size(0) == indices->shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices. ", "Got ",
values->shape().dim_size(0),
" values, indices shape: ", indices->shape().DebugString()));
OP_REQUIRES(
ctx, dense_shape->shape().dim_size(0) == indices->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", dense_shape->shape().dim_size(0),
" dimensions, indices shape: ", indices->shape().DebugString()));
OP_REQUIRES(ctx, dense_shape->NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
int64_t previous_batch_index = -1;
for (int64_t i = 0; i < indices->dim_size(0); ++i) {
int64_t next_batch_index = indices->matrix<int64_t>()(i, 0);
OP_REQUIRES(
ctx, next_batch_index >= previous_batch_index,
errors::Unimplemented("The SparseTensor must be ordered in the batch "
"dimension; handling arbitrarily ordered input "
"is not currently supported."));
previous_batch_index = next_batch_index;
}
absl::InlinedVector<int64_t, 8UL> std_order(dense_shape->NumElements(), 0);
TensorShape shape;
OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(
dense_shape->vec<int64_t>(), &shape));
sparse::SparseTensor tensor;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(*indices, *values, shape,
std_order, &tensor));
*output = new Dataset<T>(ctx, std::move(tensor));
}
private:
};
#define REGISTER_DATASET_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("SparseTensorSliceDataset") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("Tvalues"), \
SparseTensorSliceDatasetOp<type>);
TF_CALL_DATASET_TYPES(REGISTER_DATASET_KERNEL);
#undef REGISTER_DATASET_KERNEL
}
}
} | #include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "sparse_tensor_slice_dataset";
constexpr char kDatasetType[] = "SparseTensorSlice";
class SparseTensorSliceDatasetParams : public DatasetParams {
public:
SparseTensorSliceDatasetParams(Tensor indices, Tensor values,
Tensor dense_shape, DataType tvalues,
string node_name)
: DatasetParams({tvalues}, {PartialTensorShape({})},
std::move(node_name)),
indices_(std::move(indices)),
values_(std::move(values)),
dense_shape_(std::move(dense_shape)),
tvalues_(tvalues) {
iterator_prefix_ = "Iterator";
}
std::vector<Tensor> GetInputTensors() const override {
return {indices_, values_, dense_shape_};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back("indices");
input_names->emplace_back("values");
input_names->emplace_back("dense_shape");
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("Tvalues", tvalues_);
return absl::OkStatus();
}
string dataset_type() const override { return kDatasetType; }
private:
Tensor indices_;
Tensor values_;
Tensor dense_shape_;
DataType tvalues_;
};
class SparseTensorSliceDatasetOpTest : public DatasetOpsTestBase {};
SparseTensorSliceDatasetParams TwoDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 2}, {0, 0, 1, 1}),
CreateTensor<int32>({2}, {888, 999}),
CreateTensor<int64_t>({2}, {2, 2}),
DT_INT32,
kNodeName);
}
SparseTensorSliceDatasetParams ThreeDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 3}, {0, 0, 0, 1, 1, 1}),
CreateTensor<double>({2}, {888.0, 999.0}),
CreateTensor<int64_t>({3}, {2, 2, 2}),
DT_DOUBLE,
kNodeName);
}
SparseTensorSliceDatasetParams FourDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 4}, {0, 0, 0, 0, 1, 1, 1, 1}),
CreateTensor<tstring>({2}, {"a", "b"}),
CreateTensor<int64_t>({4}, {3, 2, 2, 2}),
DT_STRING,
kNodeName);
}
SparseTensorSliceDatasetParams FiveDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 5}, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1}),
CreateTensor<int32>({2}, {888, 999}),
CreateTensor<int64_t>({5}, {3, 2, 2, 2, 2}),
DT_INT32,
kNodeName);
}
template <typename T>
struct GetNextTestCase {
T dataset_params;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<GetNextTestCase<SparseTensorSliceDatasetParams>>
GetNextTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{{ CreateTensor<int64_t>({1, 1}, {0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({1}, {2})},
{ CreateTensor<int64_t>({1, 1}, {1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({1}, {2})}}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{{ CreateTensor<int64_t>({1, 2}, {0, 0}),
CreateTensor<double>({1}, {888.0}),
CreateTensor<int64_t>({2}, {2, 2})},
{{ CreateTensor<int64_t>({1, 2}, {1, 1})},
{ CreateTensor<double>({1}, {999.0})},
{ CreateTensor<int64_t>({2}, {2, 2})}}}},
{FourDimsSparseTensorSliceDatasetParams(),
{{ CreateTensor<int64_t>({1, 3}, {0, 0, 0}),
CreateTensor<tstring>({1}, {"a"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({1, 3}, {1, 1, 1}),
CreateTensor<tstring>({1}, {"b"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({0, 3}, {}),
CreateTensor<tstring>({0}, {}),
CreateTensor<int64_t>({3}, {2, 2, 2})}}},
{FiveDimsSparseTensorSliceDatasetParams(),
{
{ CreateTensor<int64_t>({1, 4}, {0, 0, 0, 0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({1, 4}, {1, 1, 1, 1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({0, 4}, {}),
CreateTensor<int32>({0}, {}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})}}}};
}
class ParameterizedGetNextTest
: public SparseTensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<SparseTensorSliceDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
auto expected_outputs_it = test_case.expected_outputs.begin();
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
TF_EXPECT_OK(ExpectEqual(out_tensors[0], expected_outputs_it->at(0)));
TF_EXPECT_OK(ExpectEqual(out_tensors[1], expected_outputs_it->at(1)));
TF_EXPECT_OK(ExpectEqual(out_tensors[2], expected_outputs_it->at(2)));
expected_outputs_it++;
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
INSTANTIATE_TEST_CASE_P(SparseTensorSliceDatasetOpTest,
ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(SparseTensorSliceDatasetOpTest, DatasetTypeString) {
auto dataset_params = TwoDimsSparseTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(name_utils::OpName(kDatasetType)));
}
TEST_F(SparseTensorSliceDatasetOpTest, DatasetNodeName) {
auto dataset_params = TwoDimsSparseTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
std::vector<DatasetOutputDtypesTestCase<SparseTensorSliceDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_DOUBLE, DT_INT64}},
{FourDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_STRING, DT_INT64}},
{FiveDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SparseTensorSliceDatasetParams>>
DatasetOutputShapesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 1}),
PartialTensorShape({1}),
PartialTensorShape({1})}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 2}),
PartialTensorShape({1}),
PartialTensorShape({2})}},
{FourDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 3}),
PartialTensorShape({1}),
PartialTensorShape({3})}},
{FiveDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 4}),
PartialTensorShape({1}),
PartialTensorShape({4})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SparseTensorSliceDatasetParams>>
CardinalityTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
2},
{ThreeDimsSparseTensorSliceDatasetParams(),
2},
{FourDimsSparseTensorSliceDatasetParams(),
3},
{FiveDimsSparseTensorSliceDatasetParams(),
3}};
}
DATASET_CARDINALITY_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<SparseTensorSliceDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_DOUBLE, DT_INT64}},
{FourDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_STRING, DT_INT64}},
{FiveDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<SparseTensorSliceDatasetParams>>
IteratorOutputShapesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 1}),
PartialTensorShape({1}),
PartialTensorShape({1})}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 2}),
PartialTensorShape({1}),
PartialTensorShape({2})}},
{FourDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 3}),
PartialTensorShape({1}),
PartialTensorShape({3})}},
{FiveDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 4}),
PartialTensorShape({1}),
PartialTensorShape({4})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(SparseTensorSliceDatasetOpTest, IteratorPrefix) {
auto dataset_params = TwoDimsSparseTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
kDatasetType, dataset_params.iterator_prefix())));
}
template <typename T>
struct IteratorSaveAndRestoreTestCase {
T dataset_params;
std::vector<int> breakpoints;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<IteratorSaveAndRestoreTestCase<SparseTensorSliceDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{0, 1, 2},
{{ CreateTensor<int64_t>({1, 1}, {0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({1}, {2})},
{ CreateTensor<int64_t>({1, 1}, {1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({1}, {2})}}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{0, 1, 2},
{{ CreateTensor<int64_t>({1, 2}, {0, 0}),
CreateTensor<double>({1}, {888.0}),
CreateTensor<int64_t>({2}, {2, 2})},
{{ CreateTensor<int64_t>({1, 2}, {1, 1})},
{ CreateTensor<double>({1}, {999.0})},
{ CreateTensor<int64_t>({2}, {2, 2})}}}},
{FourDimsSparseTensorSliceDatasetParams(),
{0, 1, 3},
{{ CreateTensor<int64_t>({1, 3}, {0, 0, 0}),
CreateTensor<tstring>({1}, {"a"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({1, 3}, {1, 1, 1}),
CreateTensor<tstring>({1}, {"b"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({0, 3}, {}),
CreateTensor<tstring>({0}, {}),
CreateTensor<int64_t>({3}, {2, 2, 2})}}},
{FiveDimsSparseTensorSliceDatasetParams(),
{0, 1, 2},
{{ CreateTensor<int64_t>({1, 4}, {0, 0, 0, 0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({1, 4}, {1, 1, 1, 1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({0, 4}, {}),
CreateTensor<int32>({0}, {}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})}}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public SparseTensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<SparseTensorSliceDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, IteratorSaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
int cur_iteration = 0;
bool end_of_sequence = false;
int64_t num_slices = dataset_->Cardinality();
std::vector<Tensor> out_tensors;
for (int breakpoint : test_case.breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_slices) {
for (int i = 0; i < out_tensors.size(); ++i) {
TF_EXPECT_OK(ExpectEqual(
out_tensors[0], test_case.expected_outputs[cur_iteration - 1][0]));
TF_EXPECT_OK(ExpectEqual(
out_tensors[1], test_case.expected_outputs[cur_iteration - 1][1]));
TF_EXPECT_OK(ExpectEqual(
out_tensors[2], test_case.expected_outputs[cur_iteration - 1][2]));
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_CASE_P(SparseTensorSliceDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff9599d4-95bd-458f-a1e1-7cabad607e71 | cpp | tensorflow/tensorflow | take_dataset_op | tensorflow/core/kernels/data/take_dataset_op.cc | tensorflow/core/kernels/data/take_dataset_op_test.cc | #include "tensorflow/core/kernels/data/take_dataset_op.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
constexpr const char* const TakeDatasetOp::kDatasetType;
constexpr const char* const TakeDatasetOp::kInputDataset;
constexpr const char* const TakeDatasetOp::kCount;
constexpr const char* const TakeDatasetOp::kOutputTypes;
constexpr const char* const TakeDatasetOp::kOutputShapes;
constexpr char kCurIndex[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kEmptyTake[] = "EmptyTake";
constexpr char kFiniteTake[] = "FiniteTake";
TakeDataset::TakeDataset(OpKernelContext* ctx, int64_t count,
const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
}
TakeDataset::TakeDataset(DatasetContext::Params params, int64_t count,
const DatasetBase* input)
: DatasetBase(DatasetContext(std::move(params))),
count_(count),
input_(input) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
TakeDataset::~TakeDataset() { input_->Unref(); }
const DataTypeVector& TakeDataset::output_dtypes() const {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& TakeDataset::output_shapes() const {
return input_->output_shapes();
}
string TakeDataset::DebugString() const {
return name_utils::DatasetDebugString(TakeDatasetOp::kDatasetType);
}
int64_t TakeDataset::CardinalityInternal(CardinalityOptions options) const {
int64_t n = input_->Cardinality(options);
if (n == kUnknownCardinality) {
return kUnknownCardinality;
}
if (n == kInfiniteCardinality) {
return count_;
} else if (count_ == kInfiniteCardinality) {
return n;
}
return std::min(n, count_);
}
Status TakeDataset::InputDatasets(
std::vector<const DatasetBase*>* inputs) const {
inputs->push_back(input_);
return absl::OkStatus();
}
Status TakeDataset::CheckExternalState() const {
return input_->CheckExternalState();
}
Status TakeDataset::Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index, out_tensors);
}
absl::Status TakeDataset::RandomIndexingCompatible() const {
return random_indexing_compatible_;
}
class TakeDataset::EmptyIterator : public DatasetIterator<TakeDataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<TakeDataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class TakeDataset::FiniteIterator : public DatasetIterator<TakeDataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<TakeDataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (dataset()->count_ < 0 || i_ < dataset()->count_) {
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (!*end_of_sequence) {
++i_;
return absl::OkStatus();
}
break;
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIndex, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputImplEmpty,
static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
std::unique_ptr<IteratorBase> TakeDataset::MakeIteratorInternal(
const string& prefix) const {
if (count_ == 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptyTake, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteTake, prefix)});
}
}
Status TakeDataset::AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
TakeDatasetOp::TakeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void TakeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new TakeDataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TakeDataset").Device(DEVICE_CPU), TakeDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/take_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "take_dataset";
class TakeDatasetOpTest : public DatasetOpsTestBase {};
TakeDatasetParams TakeLessTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
4,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeMoreTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
25,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeAllTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
-1,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeNothingTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
0,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<TakeDatasetParams>> GetNextTestCases() {
return {{TakeLessTakeDatasetParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}})},
{TakeMoreTakeDatasetParams(),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeAllTakeDatasetParams(),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeNothingTakeDatasetParams(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
GetNextTestCases())
TEST_F(TakeDatasetOpTest, DatasetNodeName) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TakeDatasetOpTest, DatasetTypeString) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(TakeDatasetOp::kDatasetType)));
}
TEST_F(TakeDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<TakeDatasetParams>>
DatasetOutputShapesTestCases() {
return {{TakeLessTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeMoreTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeAllTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeNothingTakeDatasetParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<TakeDatasetParams>> CardinalityTestCases() {
return {{TakeLessTakeDatasetParams(),
4},
{TakeMoreTakeDatasetParams(),
10},
{TakeAllTakeDatasetParams(),
10},
{TakeNothingTakeDatasetParams(),
0}};
}
DATASET_CARDINALITY_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
CardinalityTestCases())
TEST_F(TakeDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<TakeDatasetParams>>
IteratorOutputShapesTestCases() {
return {{TakeLessTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeMoreTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeAllTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeNothingTakeDatasetParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<TakeDatasetParams>>
IteratorPrefixTestCases() {
return {{TakeLessTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeLessTakeDatasetParams().iterator_prefix())},
{TakeMoreTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeMoreTakeDatasetParams().iterator_prefix())},
{TakeAllTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeAllTakeDatasetParams().iterator_prefix())},
{TakeNothingTakeDatasetParams(),
name_utils::IteratorPrefix(
"EmptyTake", TakeNothingTakeDatasetParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<TakeDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{TakeLessTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}})},
{TakeMoreTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeAllTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeNothingTakeDatasetParams(),
{0, 2, 5, 11},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/take_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/take_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3a598589-1747-42c2-8c3f-343cd0131275 | cpp | tensorflow/tensorflow | optimize_dataset_op | tensorflow/core/kernels/data/optimize_dataset_op.cc | tensorflow/core/kernels/data/optimize_dataset_op_test.cc | #include "tensorflow/core/kernels/data/optimize_dataset_op.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <map>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
constexpr const char* const OptimizeDatasetOp::kDatasetType;
constexpr const char* const OptimizeDatasetOp::kInputDataset;
constexpr const char* const OptimizeDatasetOp::kOptimizations;
constexpr const char* const
OptimizeDatasetOp::kOptimizationsEnabled;
constexpr const char* const
OptimizeDatasetOp::kOptimizationsDisabled;
constexpr const char* const
OptimizeDatasetOp::kOptimizationsDefault;
constexpr const char* const OptimizeDatasetOp::kOutputTypes;
constexpr const char* const OptimizeDatasetOp::kOutputShapes;
constexpr const char* const
OptimizeDatasetOp::kOptimizationConfigs;
constexpr const char* const OptimizeDatasetOp::kOptimizeDatasetV1;
constexpr const char* const OptimizeDatasetOp::kOptimizeDatasetV2;
namespace {
void MakeDatasetHelper(OpKernelContext* ctx,
absl::flat_hash_set<tstring>& optimizations,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase* input, DatasetBase** output) {
std::vector<string> graduated_experiments = {
"disable_intra_op_parallelism",
"use_private_thread_pool"
};
for (auto& experiment : graduated_experiments) {
if (!optimizations.contains(experiment)) {
optimizations.insert(experiment);
}
VLOG(1) << "The graduated experiment \"" << experiment << "\" is applied.";
}
if (optimizations.empty()) {
*output = input;
input->Ref();
return;
}
auto config_factory = [&optimizations, &optimization_configs]() {
return CreateRewriterConfig(optimizations, optimization_configs);
};
core::RefCountPtr<DatasetBase> rewritten;
Status s = RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten);
*output = rewritten.release();
if (errors::IsDeadlineExceeded(s)) {
LOG(WARNING) << s.ToString();
*output = input;
input->Ref();
return;
}
OP_REQUIRES_OK(ctx, s);
}
}
void OptimizeDatasetOp::MakeDatasetFromOptions(
OpKernelContext* ctx, DatasetBase* input,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase** output) {
auto experiments = GetExperiments();
LogAndRecordExperiments(experiments);
auto optimizations =
SelectOptimizations(experiments, optimizations_enabled,
optimizations_disabled, optimizations_default);
MakeDatasetHelper(ctx, optimizations, optimization_configs, input, output);
}
OptimizeDatasetOp::OptimizeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kOptimizeDatasetV1) {
op_version_ = 1;
} else if (op_name == kOptimizeDatasetV2) {
op_version_ = 2;
}
std::vector<tstring> optimization_configs;
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kOptimizationConfigs, &optimization_configs));
optimization_configs_.insert(optimization_configs.begin(),
optimization_configs.end());
}
void OptimizeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
absl::flat_hash_set<tstring> optimizations;
if (op_version_ == 1) {
std::vector<tstring> optimizations_enabled;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kOptimizations,
&optimizations_enabled));
optimizations.insert(optimizations_enabled.begin(),
optimizations_enabled.end());
} else if (op_version_ == 2) {
std::vector<tstring> optimizations_enabled, optimizations_disabled,
optimizations_default;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kOptimizationsEnabled,
&optimizations_enabled));
OP_REQUIRES_OK(ctx,
ParseVectorArgument<tstring>(ctx, kOptimizationsDisabled,
&optimizations_disabled));
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kOptimizationsDefault,
&optimizations_default));
auto experiments = GetExperiments();
LogAndRecordExperiments(experiments);
optimizations = SelectOptimizations(
experiments,
{optimizations_enabled.begin(), optimizations_enabled.end()},
{optimizations_disabled.begin(), optimizations_disabled.end()},
{optimizations_default.begin(), optimizations_default.end()});
}
MakeDatasetHelper(
ctx, optimizations,
{optimization_configs_.begin(), optimization_configs_.end()}, input,
output);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("OptimizeDataset").Device(DEVICE_CPU),
OptimizeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("OptimizeDatasetV2").Device(DEVICE_CPU),
OptimizeDatasetOp);
}
}
}
#else
namespace tensorflow {
namespace data {
void OptimizeDatasetOp::MakeDatasetFromOptions(
OpKernelContext* ctx, DatasetBase* input,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase** output) {
input->Ref();
*output = input;
}
OptimizeDatasetOp::OptimizeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void OptimizeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
input->Ref();
*output = input;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("OptimizeDataset").Device(DEVICE_CPU),
OptimizeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("OptimizeDatasetV2").Device(DEVICE_CPU),
OptimizeDatasetOp);
}
}
}
#endif | #include "tensorflow/core/kernels/data/optimize_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "optimize_dataset";
constexpr char kNoopElimination[] = "noop_elimination";
class OptimizeDatasetParams : public DatasetParams {
public:
template <typename T>
OptimizeDatasetParams(T input_dataset_params, string optimizations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
std::vector<tstring> optimization_configs,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
optimizations_(std::move(optimizations)),
optimization_configs_(std::move(optimization_configs)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<tstring>(TensorShape({1}), {optimizations_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {OptimizeDatasetOp::kInputDataset,
OptimizeDatasetOp::kOptimizations};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{OptimizeDatasetOp::kOutputShapes, output_shapes_},
{OptimizeDatasetOp::kOutputTypes, output_dtypes_},
{OptimizeDatasetOp::kOptimizationConfigs, optimization_configs_}};
return absl::OkStatus();
}
string dataset_type() const override {
return OptimizeDatasetOp::kDatasetType;
}
private:
string optimizations_;
std::vector<tstring> optimization_configs_;
};
class OptimizeDatasetOpTest : public DatasetOpsTestBase {};
TEST_F(OptimizeDatasetOpTest, NoopElimination) {
auto take_dataset_parmas =
TakeDatasetParams(RangeDatasetParams(-3, 3, 1),
-3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
auto optimize_dataset_params =
OptimizeDatasetParams(std::move(take_dataset_parmas),
{kNoopElimination},
{DT_INT64},
{PartialTensorShape({})},
{},
kNodeName);
std::vector<Tensor> expected_outputs = CreateTensors<int64_t>(
TensorShape({}), {{-3}, {-2}, {-1}, {0}, {1}, {2}});
TF_ASSERT_OK(Initialize(optimize_dataset_params));
TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/optimize_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/optimize_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
76df2d86-6f89-4427-bdce-fbb8accb3eda | cpp | tensorflow/tensorflow | prefetch_autotuner | tensorflow/core/kernels/data/prefetch_autotuner.cc | tensorflow/core/kernels/data/prefetch_autotuner_test.cc | #include "tensorflow/core/kernels/data/prefetch_autotuner.h"
#include <cstdint>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/model.h"
namespace tensorflow {
namespace data {
PrefetchAutotuner::PrefetchAutotuner(
int64_t initial_buffer_size, int64_t buffer_size_min,
std::shared_ptr<model::RamBudgetManager> ram_budget_manager)
: buffer_limit_(initial_buffer_size),
ram_budget_manager_(ram_budget_manager) {
if (initial_buffer_size == model::kAutotune) {
mode_ = Mode::kUpswing;
buffer_limit_ = std::max(int64_t{1}, buffer_size_min);
}
}
namespace {
size_t kBufferLimitThreshold = 2048;
}
void PrefetchAutotuner::SetElementSize(int64_t element_size_bytes) {
if (ram_budget_manager_ && !ram_budget_manager_->RequestLegacyPrefetchBytes(
element_size_bytes * buffer_limit_)) {
LOG(WARNING)
<< "Prefetch autotuner tried to allocate "
<< element_size_bytes * buffer_limit_ << " bytes "
<< "after encountering the first element of size " << element_size_bytes
<< " bytes."
<< "This already causes the autotune ram budget to be exceeded. To "
<< "stay within the ram budget, either increase the ram budget or "
<< "reduce element size";
}
element_size_bytes_ = element_size_bytes;
}
void PrefetchAutotuner::RecordConsumption(size_t current_buffer_size) {
switch (mode_) {
case Mode::kDisabled:
return;
case Mode::kUpswing:
if (static_cast<int64_t>(current_buffer_size) == buffer_limit_) {
mode_ = Mode::kDownswing;
}
return;
case Mode::kDownswing:
if (current_buffer_size == 0) {
if (!element_size_bytes_.has_value()) {
return;
}
int64_t element_size_bytes = *element_size_bytes_;
int64_t attempt_new_buffer_limit;
if (buffer_limit_ >= static_cast<int64_t>(kBufferLimitThreshold)) {
attempt_new_buffer_limit = buffer_limit_ + kBufferLimitThreshold;
} else {
attempt_new_buffer_limit = buffer_limit_ * 2;
}
int64_t delta_bytes =
(attempt_new_buffer_limit - buffer_limit_) * element_size_bytes;
if (!ram_budget_manager_ ||
ram_budget_manager_->RequestLegacyPrefetchBytes(delta_bytes)) {
buffer_limit_ = attempt_new_buffer_limit;
}
mode_ = Mode::kUpswing;
}
return;
}
}
}
} | #include "tensorflow/core/kernels/data/prefetch_autotuner.h"
#include <vector>
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(PrefetchAutotuner, Disabled) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(2, 0, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
t.RecordConsumption(2);
t.RecordConsumption(0);
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
}
TEST(PrefetchAutotuner, Enabled) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(model::kAutotune, 0, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(1);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
}
TEST(PrefetchAutotuner, EnabledSteady) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(model::kAutotune, 0, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
std::vector<size_t> consumption_values = {2, 3, 1, 4, 1, 2, 3, 1};
for (int i = 0; i < consumption_values.size(); ++i) {
t.RecordConsumption(consumption_values[i]);
EXPECT_EQ(4, t.buffer_limit())
<< "Failed at index " << i << " with value: " << consumption_values[i];
}
}
TEST(PrefetchAutotuner, StartWithMin) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(model::kAutotune, 2, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
std::vector<size_t> consumption_values = {3, 5, 7, 1, 4, 6, 8, 3, 5, 1, 2, 4};
for (int i = 0; i < consumption_values.size(); ++i) {
t.RecordConsumption(consumption_values[i]);
EXPECT_EQ(8, t.buffer_limit())
<< "Failed at index " << i << " with value: " << consumption_values[i];
}
}
TEST(PrefetchAutotuner, RespectRamManager) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(200);
PrefetchAutotuner t(model::kAutotune, 2, ram_manager);
t.SetElementSize(50);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
}
TEST(PrefetchAutotuner, RespectRamManagerWhenThereIsModelAllocation) {
int64_t model_allocation = 100000;
auto ram_manager = std::make_shared<model::RamBudgetManager>(
200 + model_allocation);
ASSERT_TRUE(ram_manager->RequestModelAllocation(model_allocation));
PrefetchAutotuner t(model::kAutotune, 2, ram_manager);
t.SetElementSize(50);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
ASSERT_TRUE(ram_manager->RequestModelAllocation(0));
t.RecordConsumption(4);
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
t.RecordConsumption(8);
t.RecordConsumption(0);
EXPECT_EQ(16, t.buffer_limit());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/prefetch_autotuner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/prefetch_autotuner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a753d44-009b-4e57-92f1-ff59a6d25495 | cpp | tensorflow/tensorflow | skip_dataset_op | tensorflow/core/kernels/data/skip_dataset_op.cc | tensorflow/core/kernels/data/skip_dataset_op_test.cc | #include "tensorflow/core/kernels/data/skip_dataset_op.h"
#include <cstddef>
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
constexpr const char* const SkipDatasetOp::kDatasetType;
constexpr const char* const SkipDatasetOp::kInputDataset;
constexpr const char* const SkipDatasetOp::kCount;
constexpr const char* const SkipDatasetOp::kOutputTypes;
constexpr const char* const SkipDatasetOp::kOutputShapes;
constexpr char kEmptySkip[] = "EmptySkip";
constexpr char kFiniteSkip[] = "FiniteSkip";
constexpr char kCurIndex[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
class SkipDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
if (input_ != nullptr && count >= 0) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
} else {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("Global shuffling does not support empty dataset or "
"skipping the entire dataset. Got skip(",
count, ")."));
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
if (count_ < 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptySkip, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteSkip, prefix)});
}
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return count_ < 0 ? 0 : std::max(int64_t{0}, n - count_);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index + count_, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
private:
class EmptyIterator : public DatasetIterator<Dataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class FiniteIterator : public DatasetIterator<Dataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (i_ < dataset()->count_) {
int num_skipped;
TF_RETURN_IF_ERROR(input_impl_->Skip(ctx, dataset()->count_ - i_,
end_of_sequence, &num_skipped));
i_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
}
return absl::OkStatus();
}
absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
ctx_with_index_mapper.MergeCheckpoint();
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t skip_count = dataset()->count_;
return [parent_index_mapper,
skip_count](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(element_position));
return shuffled_element_position + skip_count;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIndex, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
mutex_lock l(mu_);
return RestoreInput(ctx, reader, input_impl_);
}
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
SkipDatasetOp::SkipDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void SkipDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new Dataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SkipDataset").Device(DEVICE_CPU), SkipDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/skip_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "skip_dataset";
class SkipDatasetParams : public DatasetParams {
public:
template <typename T>
SkipDatasetParams(T input_dataset_params, int64_t count,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
count_(count) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {count_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(SkipDatasetOp::kInputDataset);
input_names->emplace_back(SkipDatasetOp::kCount);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return SkipDatasetOp::kDatasetType; }
private:
int64_t count_;
};
class SkipDatasetOpTest : public DatasetOpsTestBase {};
SkipDatasetParams SkipDatasetParams1() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
4,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams2() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
25,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams3() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
10,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams4() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
0,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams5() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
-1,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<SkipDatasetParams>> GetNextTestCases() {
return {
{SkipDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams2(),
{}},
{SkipDatasetParams3(),
{}},
{SkipDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams5(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
GetNextTestCases())
TEST_F(SkipDatasetOpTest, DatasetNodeName) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SkipDatasetOpTest, DatasetTypeString) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(SkipDatasetOp::kDatasetType)));
}
TEST_F(SkipDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(SkipDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<SkipDatasetParams>> CardinalityTestCases() {
return {{SkipDatasetParams1(),
6},
{SkipDatasetParams2(),
0},
{SkipDatasetParams3(),
0},
{SkipDatasetParams4(),
10},
{SkipDatasetParams5(),
0}};
}
DATASET_CARDINALITY_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
CardinalityTestCases())
TEST_F(SkipDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(SkipDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
std::vector<IteratorPrefixTestCase<SkipDatasetParams>>
IteratorPrefixTestCases() {
return {{SkipDatasetParams1(),
name_utils::IteratorPrefix("FiniteSkip",
SkipDatasetParams1().iterator_prefix())},
{SkipDatasetParams2(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams2().iterator_prefix())},
{SkipDatasetParams3(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams3().iterator_prefix())},
{SkipDatasetParams4(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams4().iterator_prefix())},
{SkipDatasetParams5(),
name_utils::IteratorPrefix(
"EmptySkip", SkipDatasetParams5().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<SkipDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{SkipDatasetParams1(),
{0, 2, 7},
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams2(),
{0, 2, 5},
{}},
{SkipDatasetParams3(),
{0, 2, 5},
{}},
{SkipDatasetParams4(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape{}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams5(),
{0, 2, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/skip_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/skip_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
332dbf76-b2f0-4a9f-8282-433353f67b1c | cpp | tensorflow/tensorflow | options_dataset_op | tensorflow/core/kernels/data/options_dataset_op.cc | tensorflow/core/kernels/data/options_dataset_op_test.cc | #include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
constexpr const char* const OptionsDatasetOp::kDatasetType;
constexpr const char* const OptionsDatasetOp::kInputDataset;
constexpr const char* const OptionsDatasetOp::kOutputTypes;
constexpr const char* const OptionsDatasetOp::kOutputShapes;
constexpr const char* const OptionsDatasetOp::kSerializedOptions;
class OptionsDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const string& serialized_options)
: DatasetBase(DatasetContext(ctx)),
input_(input),
serialized_options_(serialized_options) {
input_->Ref();
Options options;
OP_REQUIRES(ctx, options.ParseFromString(serialized_options),
errors::InvalidArgument(absl::StrCat(
"Could not parse ", OptionsDatasetOp::kSerializedOptions,
" as valid Options.")));
set_options(options);
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
DCHECK(false) << "OptionsDatasetOp::Dataset::MakeIteratorInternal is not "
"expected to be called because it is supposed to forward "
"the iterator to its input dataset(s).";
LOG(ERROR) << "Datasets of type " << type_string()
<< " forwards its iterator to its input dataset. "
"`MakeIteratorInternal` is not implemented.";
return nullptr;
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return input_->Get(ctx, index, out_tensors);
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
AttrValue serialized_options_attr;
b->BuildAttrValue(serialized_options_, &serialized_options_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_graph_node},
{std::make_pair(kSerializedOptions, serialized_options_attr)}, output));
return absl::OkStatus();
}
private:
const DatasetBase* input_;
const tstring serialized_options_;
absl::Status random_indexing_compatible_;
};
void OptionsDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
*output = new Dataset(ctx, input, serialized_options_);
}
OptionsDatasetOp::OptionsDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSerializedOptions, &serialized_options_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("OptionsDataset").Device(DEVICE_CPU).Priority(2),
OptionsDatasetOp);
REGISTER_KERNEL_BUILDER(Name("OptionsDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
OptionsDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptions[] = R"proto(
deterministic: true
slack: true
optimization_options { apply_default_optimizations: true autotune: true }
)proto";
class OptionsDatasetOpTest : public DatasetOpsTestBase {};
OptionsDatasetParams OptionsDatasetParams0() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams OptionsDatasetParams1() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(10, 0, -3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_1");
}
OptionsDatasetParams OptionsDatasetParams2() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 5, 1),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_2");
}
std::vector<GetNextTestCase<OptionsDatasetParams>> GetNextTestCases() {
return {{OptionsDatasetParams0(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{OptionsDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})},
{OptionsDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
ITERATOR_GET_NEXT_TEST_P(OptionsDatasetOpTest, OptionsDatasetParams,
GetNextTestCases())
TEST_F(OptionsDatasetOpTest, DatasetOptions) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
Options expected_options;
protobuf::TextFormat::ParseFromString(kOptions, &expected_options);
TF_ASSERT_OK(CheckDatasetOptions(expected_options));
}
TEST_F(OptionsDatasetOpTest, DatasetNodeName) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(OptionsDatasetOpTest, DatasetTypeString) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(OptionsDatasetOp::kDatasetType)));
}
TEST_F(OptionsDatasetOpTest, DatasetoutputDTypes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(OptionsDatasetOpTest, DatasetoutputShapes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(OptionsDatasetOpTest, DatasetCardinality) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(4));
}
TEST_F(OptionsDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(OptionsDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(OptionsDatasetOpTest, IteratorPrefix) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
RangeDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/options_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/options_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6fed1475-0ff7-47b4-b9ac-9fe5bccb633b | cpp | tensorflow/tensorflow | iterator_ops | tensorflow/core/kernels/data/iterator_ops.cc | tensorflow/core/kernels/data/iterator_ops_test.cc | #include "tensorflow/core/kernels/data/iterator_ops.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/activity_watcher/activity_utils.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/finalization_utils.h"
#include "tensorflow/core/data/metric_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/tf_data_memory_logger.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/model.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/kernels/data/optional_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
const char kAnonymousIterator[] = "AnonymousIterator";
const char kAnonymousIteratorV2[] = "AnonymousIteratorV2";
const char kAnonymousIteratorV3[] = "AnonymousIteratorV3";
const char kIteratorVariantTypeName[] = "tensorflow::Iterator";
const char kOutputShapes[] = "output_shapes";
const char kOutputTypes[] = "output_types";
bool SymbolicCheckpointEnabled(const Options& options) {
return options.optional_symbolic_checkpoint_case() ==
Options::kSymbolicCheckpoint &&
options.symbolic_checkpoint();
}
}
constexpr const char* const
SerializeIteratorOp::kExternalStatePolicy;
IteratorResource::IteratorResource(
Env* env, const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes,
std::unique_ptr<DeviceMgr> device_mgr,
std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* flr)
: metrics_collector_(flr->device()->device_type(), *env),
unbounded_thread_pool_(env, "tf_data_iterator_resource"),
env_(*env),
device_mgr_(std::move(device_mgr)),
iterator_state_(std::make_shared<State>(std::move(flib_def),
std::move(pflr), flr,
nullptr)),
output_dtypes_(output_dtypes),
output_shapes_(output_shapes) {
VLOG(2) << "creating iterator resource";
}
IteratorResource::~IteratorResource() {
TfDatazMetricsRegistry::Deregister(tf_dataz_metrics_collector_);
VLOG(2) << "destroying iterator resource";
}
Status IteratorResource::GetNext(OpKernelContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"GetNext() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this iterator "
"before getting the next element.");
}
auto* dataset = captured_state->dataset();
IteratorContext::Params params(ctx);
params.cancellation_manager = captured_state->cancellation_manager();
params.flr = captured_state->flr();
params.function_handle_cache = captured_state->function_handle_cache();
params.resource_mgr = captured_state->resource_mgr();
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = captured_state->id_registry();
params.warm_start = dataset->options().warm_start();
params.model = captured_state->model();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(std::move(params));
const absl::Time start_time = metrics_collector_.RecordStart();
auto status = iterator->GetNext(&iter_ctx, out_tensors, end_of_sequence);
metrics_collector_.RecordStop(start_time, *out_tensors);
const int64_t get_next_latency_micros =
env_.NowMicros() - absl::ToUnixMicros(start_time);
tf_dataz_metrics_collector_->RecordGetNextLatency(get_next_latency_micros);
captured_state->MergeCheckpoint(iter_ctx.checkpoint());
return status;
}
absl::Status IteratorResource::GetModelProto(std::string& model_proto) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return absl::FailedPreconditionError(
"GetModelProto() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this iterator "
"before getting the next element.");
}
model::ModelProto proto;
if (auto model = captured_state->model(); model) {
TF_RETURN_IF_ERROR(model->ToProto(&proto));
} else {
return absl::NotFoundError(
"Cannot find this iterator's analytical model. Did you disable "
"autotune for the dataset used to create this iterator? See more "
"information at "
"https:
"AutotuneOptions .");
}
model_proto = proto.SerializeAsString();
return absl::OkStatus();
}
Status IteratorResource::Save(OpKernelContext* ctx,
ExternalStatePolicy external_state_policy,
IteratorStateWriter* writer) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"Save() failed because the iterator has not been initialized. Ensure "
"that you have run the initializer operation for this iterator before "
"saving it.");
}
auto* dataset = captured_state->dataset();
if (SymbolicCheckpointEnabled(dataset->options())) {
const auto& checkpoint = captured_state->checkpoint();
if (!checkpoint.GetStatus().ok()) {
LOG(WARNING) << "Symbolic checkpointing failed: "
<< checkpoint.GetStatus();
return checkpoint.GetStatus();
}
LOG(INFO) << "Saving symbolic checkpoint";
TF_RETURN_IF_ERROR(checkpoint.Save(writer));
return absl::OkStatus();
}
SerializationContext::Params params(ctx);
params.external_state_policy = external_state_policy;
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
SerializationContext serialization_ctx(params);
return iterator->Save(&serialization_ctx, writer);
}
Status IteratorResource::Restore(OpKernelContext* ctx,
IteratorStateReader* reader) {
const DatasetBase* dataset;
std::shared_ptr<State> new_state;
const DatasetBase* input_dataset;
{
tf_shared_lock l(mu_);
auto iterator = iterator_state_->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"Restore() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this "
"iterator before restoring it.");
}
dataset = iterator->dataset();
dataset->Ref();
new_state =
std::make_shared<State>(iterator_state_->flib_def(),
iterator_state_->pflr(), iterator_state_->flr(),
nullptr);
input_dataset = iterator_state_->dataset();
iterator_state_->cancellation_manager()->StartCancel();
}
core::ScopedUnref scoped_unref(dataset);
IteratorContext::Params params(ctx);
params.cancellation_manager = new_state->cancellation_manager();
params.flr = new_state->flr();
params.function_handle_cache = new_state->function_handle_cache();
params.resource_mgr = new_state->resource_mgr();
params.symbolic_checkpoint =
SymbolicCheckpointEnabled(input_dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = new_state->id_registry();
params.warm_start = dataset->options().warm_start();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(IteratorContext(std::move(params)));
std::unique_ptr<IteratorBase> iterator_base;
TF_RETURN_IF_ERROR(dataset->MakeIteratorFromCheckpoint(
&iter_ctx, "Iterator", reader, &iterator_base));
new_state->DowncastAndSetIteratorAndDataset(std::move(iterator_base),
input_dataset);
new_state->MergeCheckpoint(iter_ctx.checkpoint());
mutex_lock l(mu_);
std::swap(iterator_state_, new_state);
return absl::OkStatus();
}
Status IteratorResource::SetIteratorFromDataset(OpKernelContext* ctx,
const DatasetBase* dataset) {
std::shared_ptr<State> new_state;
{
tf_shared_lock l(mu_);
new_state =
std::make_shared<State>(iterator_state_->flib_def(),
iterator_state_->pflr(), iterator_state_->flr(),
nullptr);
}
IteratorContext::Params params(ctx);
params.cancellation_manager = new_state->cancellation_manager();
params.flr = new_state->flr();
params.function_handle_cache = new_state->function_handle_cache();
params.resource_mgr = new_state->resource_mgr();
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = new_state->id_registry();
params.warm_start = dataset->options().warm_start();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(IteratorContext(std::move(params)));
std::unique_ptr<IteratorBase> iterator;
if (ctx->function_library()->device()->device_type() == DEVICE_CPU) {
DatasetBase* finalized_dataset;
TF_ASSIGN_OR_RETURN(finalized_dataset, GetFinalizedDataset(ctx, dataset));
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(&iter_ctx,
nullptr,
"Iterator", &iterator));
} else {
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iter_ctx,
nullptr, "Iterator",
&iterator));
}
TF_RETURN_IF_ERROR(
VerifyTypesMatch(output_dtypes_, iterator->output_dtypes()));
TF_RETURN_IF_ERROR(
VerifyShapesCompatible(output_shapes_, iterator->output_shapes()));
new_state->DowncastAndSetIteratorAndDataset(std::move(iterator), dataset);
new_state->SetModel(iter_ctx.model());
new_state->MergeCheckpoint(iter_ctx.checkpoint());
mutex_lock l(mu_);
std::swap(iterator_state_, new_state);
tf_dataz_metrics_collector_ = std::make_shared<TfDatazMetricsCollector>(
env_, iterator_state_->iterator(), iterator_state_->model());
EnsureIteratorMemoryLoggerStarted();
TfDatazMetricsRegistry::Register(tf_dataz_metrics_collector_);
return absl::OkStatus();
}
void IteratorResource::State::DowncastAndSetIteratorAndDataset(
std::unique_ptr<IteratorBase> it, const DatasetBase* dataset) {
iterator_.reset(static_cast<DatasetBaseIterator*>(it.release()));
if (dataset) {
dataset->Ref();
dataset_.reset(const_cast<DatasetBase*>(dataset));
}
}
void IteratorResource::State::MergeCheckpoint(MemoryCheckpoint* other) {
if (SymbolicCheckpointEnabled(dataset_->options())) {
checkpoint_.Merge(other);
}
}
void IteratorResource::State::SetModel(std::shared_ptr<model::Model> model) {
model_ = model;
}
namespace {
class IteratorVariantSerializer {
public:
IteratorVariantSerializer() = default;
Status InitializeFromIterator(OpKernelContext* ctx,
ExternalStatePolicy external_state_policy,
IteratorResource* iterator_resource) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(
iterator_resource->Save(ctx, external_state_policy, &writer));
std::vector<std::unique_ptr<VariantTensorData>> data;
writer.ReleaseData(&data);
variants_.clear();
variants_.reserve(data.size());
for (auto& it : data) {
IteratorStateVariant v;
TF_RETURN_IF_ERROR(v.InitializeFromVariantData(std::move(it)));
variants_.push_back(v);
}
num_tensors_ = variants_.size();
can_serialize_ = true;
return absl::OkStatus();
}
Status InitFromTensor(const Tensor* serialized_t) {
int64_t num_tensors = serialized_t->dim_size(0);
auto serialized_vec = serialized_t->vec<Variant>();
std::vector<const VariantTensorData*> data;
data.reserve(num_tensors);
for (int i = 0; i < num_tensors; ++i) {
auto* w = serialized_vec(i).get<IteratorStateVariant>();
if (!w) {
return errors::Internal(
"Cannot initialize an iterator from tensor ",
serialized_vec(i).DebugString(),
". Expected a variant tensor of type IteratorStateVariant");
}
data.push_back(w->GetData());
}
reader_ = std::make_unique<VariantTensorDataReader>(data);
num_tensors_ = data.size();
return absl::OkStatus();
}
int64_t NumTensors() { return num_tensors_; }
Status Serialize(Tensor* serialized) {
if (!can_serialize_) {
return errors::InvalidArgument(
"Please call InitializeFromIterator before calling Serialize.");
}
int64_t size = variants_.size();
for (int64_t i = 0; i < size; ++i) {
if (variants_[i].GetData() == nullptr) {
return errors::Internal(
"Cannot serialize an empty IteratorStateVariant");
}
serialized->vec<Variant>()(i) = variants_[i];
}
return absl::OkStatus();
}
IteratorStateReader* GetReader() { return reader_.get(); }
private:
bool can_serialize_ = false;
int64_t num_tensors_;
std::vector<IteratorStateVariant> variants_;
std::unique_ptr<IteratorStateReader> reader_;
};
}
IteratorHandleOp::IteratorHandleOp(OpKernelConstruction* ctx)
: OpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("shared_name", &name_));
}
IteratorHandleOp::~IteratorHandleOp() {
if (resource_ != nullptr) {
resource_->Unref();
if (cinfo_.resource_is_private_to_kernel()) {
if (!cinfo_.resource_manager()
->template Delete<IteratorResource>(cinfo_.container(),
cinfo_.name())
.ok()) {
}
}
}
}
void IteratorHandleOp::Compute(OpKernelContext* context)
TF_LOCKS_EXCLUDED(mu_) {
{
mutex_lock l(mu_);
if (resource_ == nullptr) {
FunctionLibraryRuntime* flr;
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(nullptr);
if (!name_.empty()) {
flr = CreatePrivateFLR(context, &device_mgr, &flib_def, &pflr);
} else {
OP_REQUIRES_OK(context, context->function_library()->Clone(
&flib_def, &pflr, &flr, true));
}
ResourceMgr* mgr = context->resource_manager();
OP_REQUIRES_OK(context, cinfo_.Init(mgr, def()));
IteratorResource* resource;
OP_REQUIRES_OK(
context,
mgr->LookupOrCreate<IteratorResource>(
cinfo_.container(), cinfo_.name(), &resource,
[context, flr, &device_mgr, &flib_def, &pflr,
this](IteratorResource** ret) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
*ret = new IteratorResource(
context->env(), output_dtypes_, output_shapes_,
std::move(device_mgr), std::move(flib_def), std::move(pflr),
flr);
return absl::OkStatus();
}));
Status s = VerifyResource(resource);
if (TF_PREDICT_FALSE(!s.ok())) {
resource->Unref();
context->SetStatus(s);
return;
}
resource_ = resource;
}
}
OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
context, 0, cinfo_.container(), cinfo_.name(),
TypeIndex::Make<IteratorResource>()));
}
Status IteratorHandleOp::VerifyResource(IteratorResource* resource) {
TF_RETURN_IF_ERROR(
VerifyTypesMatch(output_dtypes_, resource->output_dtypes()));
TF_RETURN_IF_ERROR(
VerifyShapesCompatible(output_shapes_, resource->output_shapes()));
return absl::OkStatus();
}
FunctionLibraryRuntime* IteratorHandleOp::CreatePrivateFLR(
OpKernelContext* ctx, std::unique_ptr<DeviceMgr>* device_mgr,
std::unique_ptr<FunctionLibraryDefinition>* flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime>* pflr) {
*device_mgr =
std::make_unique<StaticDeviceMgr>(RenamedDevice::NewRenamedDevice(
ctx->device()->name(), down_cast<Device*>(ctx->device()),
false , false ));
*flib_def = std::make_unique<FunctionLibraryDefinition>(
*ctx->function_library()->GetFunctionLibraryDefinition());
const auto* config = ctx->function_library()->config_proto();
*pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr->get(), ctx->env(),
config, graph_def_version_, flib_def->get(),
config->graph_options().optimizer_options());
return (*pflr)->GetFLR(ctx->device()->name());
}
AnonymousIteratorHandleOp::AnonymousIteratorHandleOp(
OpKernelConstruction* context)
: AnonymousResourceOp<IteratorResource>(
context,
context->def().op() == kAnonymousIteratorV2 ||
context->def().op() == kAnonymousIteratorV3,
context->def().op() == kAnonymousIteratorV2),
graph_def_version_(context->graph_def_version()) {
OP_REQUIRES_OK(context, context->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(context, context->GetAttr(kOutputShapes, &output_shapes_));
}
string AnonymousIteratorHandleOp::name() { return kAnonymousIterator; }
Status AnonymousIteratorHandleOp::CreateResource(
OpKernelContext* ctx, std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* lib, IteratorResource** resource) {
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
*resource = new IteratorResource(ctx->env(), output_dtypes_, output_shapes_,
std::move(device_mgr), std::move(flib_def),
std::move(pflr), lib);
return absl::OkStatus();
}
HybridAsyncOpKernel::HybridAsyncOpKernel(OpKernelConstruction* ctx,
const char* background_worker_name)
: AsyncOpKernel(ctx),
background_worker_(ctx->env(), background_worker_name) {}
void HybridAsyncOpKernel::ComputeAsync(OpKernelContext* ctx,
DoneCallback done) {
background_worker_.Schedule([this, ctx, done = std::move(done)]() {
ctx->SetStatus(DoCompute(ctx));
done();
});
}
void HybridAsyncOpKernel::Compute(OpKernelContext* ctx) {
ctx->SetStatus(DoCompute(ctx));
}
Status MakeIteratorOp::DoCompute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
IteratorResource* iterator_resource;
TF_RETURN_IF_ERROR(
LookupResource(ctx, HandleFromInput(ctx, 1), &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
return iterator_resource->SetIteratorFromDataset(ctx, dataset);
}
Status DeleteIteratorOp::DoCompute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
const ResourceHandle& handle = ctx->input(0).flat<ResourceHandle>()(0);
return DeleteResource(ctx, handle);
}
namespace {
class ToSingleElementOp : public AsyncOpKernel {
public:
explicit ToSingleElementOp(OpKernelConstruction* ctx)
: AsyncOpKernel(ctx),
metrics_collector_(ctx->device()->attributes().device_type(),
*ctx->env()),
unbounded_threadpool_(ctx->env(), "tf_data_to_single_element") {
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
}
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
unbounded_threadpool_.Schedule([this, ctx, done = std::move(done)]() {
ctx->SetStatus(DoCompute(ctx));
done();
});
}
void Compute(OpKernelContext* ctx) override {
ctx->SetStatus(DoCompute(ctx));
}
private:
Status DoCompute(OpKernelContext* ctx) {
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode("ToSingleElementOp::DoCompute",
{{"id", ctx->step_id()}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("ToSingleElementOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
IteratorContext::Params params(ctx);
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(dataset->MakeIterator(
&iter_ctx, nullptr, "SingleElementIterator", &iterator));
std::vector<Tensor> components;
components.reserve(dataset->output_dtypes().size());
bool end_of_sequence = false;
const absl::Time start_time = metrics_collector_.RecordStart();
TF_RETURN_IF_ERROR(
iterator->GetNext(&iter_ctx, &components, &end_of_sequence));
metrics_collector_.RecordStop(start_time, components);
if (end_of_sequence) {
return errors::InvalidArgument("Dataset was empty.");
}
TF_RETURN_IF_ERROR(VerifyTypesMatch(output_types_, components));
TF_RETURN_IF_ERROR(VerifyShapesCompatible(output_shapes_, components));
for (int i = 0; i < components.size(); ++i) {
ctx->set_output(i, components[i]);
}
components.clear();
TF_RETURN_IF_ERROR(
iterator->GetNext(&iter_ctx, &components, &end_of_sequence));
if (!end_of_sequence) {
return errors::InvalidArgument("Dataset had more than one element.");
}
return absl::OkStatus();
}
IteratorMetricsCollector metrics_collector_;
UnboundedThreadPool unbounded_threadpool_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
class OneShotIteratorOp : public AsyncOpKernel {
public:
explicit OneShotIteratorOp(OpKernelConstruction* ctx)
: AsyncOpKernel(ctx),
background_worker_(ctx->env(), "tf_data_one_shot_iterator"),
graph_def_version_(ctx->graph_def_version())
{
string shared_name;
OP_REQUIRES_OK(ctx, ctx->GetAttr("shared_name", &shared_name));
OP_REQUIRES(ctx, shared_name.empty(),
errors::InvalidArgument("OneShotIteratorOp does not currently "
"support the 'shared_name' attr."));
OP_REQUIRES_OK(ctx,
ctx->GetAttr("dataset_factory", &dataset_factory_func_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
~OneShotIteratorOp() override {
if (iterator_resource_ != nullptr) {
iterator_resource_->Unref();
if (!cinfo_.resource_manager()
->Delete<IteratorResource>(cinfo_.container(), cinfo_.name())
.ok()) {
}
}
}
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
{
mutex_lock l(mu_);
if (iterator_resource_ == nullptr && initialization_status_.ok()) {
if (!initialization_started_) {
background_worker_.Schedule([this, ctx, done]() { Init(ctx, done); });
initialization_started_ = true;
} else {
done_callbacks_.emplace_back(ctx, std::move(done));
}
return;
}
}
ProduceOutput(ctx, done);
}
private:
void Init(OpKernelContext* ctx, const DoneCallback& done) {
IteratorResource* iterator = nullptr;
ContainerInfo cinfo;
Status s = TryInit(ctx, &iterator, &cinfo);
std::vector<std::pair<OpKernelContext*, DoneCallback>> callbacks_to_run;
{
mutex_lock l(mu_);
if (s.ok()) {
iterator_resource_ = iterator;
cinfo_ = cinfo;
}
initialization_status_ = s;
std::swap(done_callbacks_, callbacks_to_run);
}
for (auto&& ctx_done : callbacks_to_run) {
ProduceOutput(ctx_done.first, ctx_done.second);
}
ProduceOutput(ctx, done);
}
Status TryInit(OpKernelContext* ctx, IteratorResource** iterator,
ContainerInfo* cinfo) {
TF_RETURN_IF_ERROR(cinfo->Init(ctx->resource_manager(), def()));
FunctionLibraryRuntime* flr;
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(nullptr);
TF_RETURN_IF_ERROR(
ctx->function_library()->Clone(&flib_def, &pflr, &flr, true));
TF_RETURN_IF_ERROR(
ctx->resource_manager()->LookupOrCreate<IteratorResource>(
cinfo->container(), cinfo->name(), iterator,
[ctx, flr, this, &flib_def, &pflr](IteratorResource** ret)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
*ret = new IteratorResource(
ctx->env(), output_dtypes_, output_shapes_,
nullptr, std::move(flib_def),
std::move(pflr), flr);
return absl::OkStatus();
}));
core::ScopedUnref unref_iterator(*iterator);
TF_RETURN_IF_ERROR(
VerifyTypesMatch(output_dtypes_, (*iterator)->output_dtypes()));
TF_RETURN_IF_ERROR(
VerifyShapesCompatible(output_shapes_, (*iterator)->output_shapes()));
FunctionLibraryRuntime::Handle f_handle;
TF_RETURN_IF_ERROR(ctx->function_library()->Instantiate(
dataset_factory_func_.name(), AttrSlice(&dataset_factory_func_.attr()),
&f_handle));
FunctionLibraryRuntime::Options opts;
opts.cancellation_manager = ctx->cancellation_manager();
ScopedStepContainer step_container(opts.step_id, [ctx](const string& name) {
ctx->resource_manager()->Cleanup(name).IgnoreError();
});
opts.step_container = &step_container;
opts.runner = ctx->runner();
opts.run_all_kernels_inline = ctx->run_all_kernels_inline();
std::vector<Tensor> return_values;
TF_RETURN_IF_ERROR(ctx->function_library()->RunSync(
std::move(opts), f_handle, {}, &return_values));
if (return_values.size() != 1 || return_values[0].dtype() != DT_VARIANT ||
!TensorShapeUtils::IsScalar(return_values[0].shape())) {
return errors::InvalidArgument(
"The `dataset_factory` function must return "
"a single scalar of dtype DT_VARIANT.");
}
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(return_values[0], &dataset));
TF_RETURN_IF_ERROR((*iterator)->SetIteratorFromDataset(ctx, dataset));
(*iterator)->Ref();
return absl::OkStatus();
}
void ProduceOutput(OpKernelContext* ctx, const DoneCallback& done) {
Tensor* handle;
OP_REQUIRES_OK_ASYNC(ctx, ctx->allocate_output(0, TensorShape({}), &handle),
done);
Status s;
{
mutex_lock l(mu_);
s = initialization_status_;
if (s.ok()) {
handle->scalar<ResourceHandle>()() =
MakeResourceHandle<IteratorResource>(ctx, cinfo_.container(),
cinfo_.name());
}
}
OP_REQUIRES_OK_ASYNC(ctx, s, done);
done();
}
NameAttrList dataset_factory_func_;
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
BackgroundWorker background_worker_;
mutex mu_;
ContainerInfo cinfo_ TF_GUARDED_BY(mu_);
IteratorResource* iterator_resource_ TF_GUARDED_BY(mu_) = nullptr;
bool initialization_started_ TF_GUARDED_BY(mu_) = false;
Status initialization_status_ TF_GUARDED_BY(mu_);
std::vector<std::pair<OpKernelContext*, DoneCallback>> done_callbacks_
TF_GUARDED_BY(mu_);
const int graph_def_version_;
};
}
AsyncOpKernel* IteratorGetNextOp::AsAsync() {
return type_string() == "IteratorGetNextSync" ? nullptr : this;
}
void RecordElementSize(const std::vector<Tensor> element,
tsl::profiler::TraceMe* traceme) {
traceme->AppendMetadata([&]() {
int64_t element_size = 0;
for (const auto& component : element) {
element_size += component.TotalBytes();
}
return tsl::profiler::TraceMeEncode({{"element_size", element_size}});
});
}
Status IteratorGetNextOp::DoCompute(OpKernelContext* ctx) {
VLOG(3) << "IteratorGetNextOp enter. iter_id=" << ctx->frame_iter().iter_id;
auto cleanup = gtl::MakeCleanup([ctx] {
VLOG(3) << "IteratorGetNextOp exit. iter_id=" << ctx->frame_iter().iter_id;
});
activity_watcher::ActivityScope activity_scope([ctx = ctx]() {
return activity_watcher::ActivityFromContext(
ctx, "IteratorGetNextOp::DoCompute",
activity_watcher::ActivityCategory::kDatasetOp);
});
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"IteratorGetNextOp::DoCompute",
{{"id", ctx->step_id()}, {"iter_num", ctx->frame_iter().iter_id}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("IteratorGetNextOp");
IteratorResource* iterator;
TF_RETURN_IF_ERROR(LookupResource(ctx, HandleFromInput(ctx, 0), &iterator));
core::ScopedUnref unref_iterator(iterator);
std::vector<Tensor> components;
bool end_of_sequence = false;
TF_RETURN_IF_ERROR(iterator->GetNext(ctx, &components, &end_of_sequence));
if (end_of_sequence) {
return errors::OutOfRange("End of sequence");
}
TF_RETURN_IF_ERROR(VerifyTypesMatch(output_types_, components));
TF_RETURN_IF_ERROR(VerifyShapesCompatible(output_shapes_, components));
RecordElementSize(components, &traceme);
for (int i = 0; i < components.size(); ++i) {
ctx->set_output(i, components[i]);
}
return absl::OkStatus();
}
Status IteratorGetModelProtoOp::DoCompute(OpKernelContext* ctx) {
IteratorResource* iterator = nullptr;
TF_RETURN_IF_ERROR(LookupResource(ctx, HandleFromInput(ctx, 0), &iterator));
core::ScopedUnref unref_iterator(iterator);
std::string model_proto;
TF_RETURN_IF_ERROR(iterator->GetModelProto(model_proto));
Tensor* model_proto_result;
TF_RETURN_IF_ERROR(
ctx->allocate_output(0, TensorShape({}), &model_proto_result));
model_proto_result->scalar<tstring>()() = model_proto;
return absl::OkStatus();
}
Status IteratorGetNextAsOptionalOp::DoCompute(OpKernelContext* ctx) {
VLOG(3) << "IteratorGetNextAsOptionalOp enter. iter_id="
<< ctx->frame_iter().iter_id;
auto cleanup = gtl::MakeCleanup([ctx] {
VLOG(3) << "IteratorGetNextAsOptionalOp exit. iter_id="
<< ctx->frame_iter().iter_id;
});
activity_watcher::ActivityScope activity_scope([ctx = ctx]() {
return activity_watcher::ActivityFromContext(
ctx, "IteratorGetNextAsOptionalOp::DoCompute",
activity_watcher::ActivityCategory::kDatasetOp);
});
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"IteratorGetNextAsOptionalOp::DoCompute",
{{"id", ctx->step_id()}, {"iter_num", ctx->frame_iter().iter_id}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("IteratorGetNextAsOptionalOp");
IteratorResource* iterator;
TF_RETURN_IF_ERROR(LookupResource(ctx, HandleFromInput(ctx, 0), &iterator));
core::ScopedUnref unref_iterator(iterator);
std::vector<Tensor> components;
bool end_of_sequence = false;
TF_RETURN_IF_ERROR(iterator->GetNext(ctx, &components, &end_of_sequence));
if (end_of_sequence) {
return WriteOptionalNoneToOutput(ctx, 0);
} else {
RecordElementSize(components, &traceme);
for (int i = 0; i < components.size(); ++i) {
if (components[i].dtype() != output_types_[i]) {
return errors::InvalidArgument(
"The given optional does not match the expected type for "
"component ",
i, ". Expected: ", DataTypeString(output_types_[i]),
". Actual: ", DataTypeString(components[i].dtype()), ".");
}
if (!output_shapes_[i].IsCompatibleWith(components[i].shape())) {
return errors::InvalidArgument(
"The given optional does not match the expected shape "
"for component ",
i, ". Expected: ", output_shapes_[i].DebugString(),
". Actual: ", components[i].shape().DebugString(), ".");
}
}
return WriteOptionalWithValueToOutput(ctx, 0, std::move(components));
}
}
void IteratorToStringHandleOp::Compute(OpKernelContext* ctx) {
const Tensor& resource_handle_t = ctx->input(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(resource_handle_t.shape()),
errors::InvalidArgument("resource_handle must be a scalar"));
IteratorResource* iterator_resource;
OP_REQUIRES_OK(
ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &iterator_resource));
iterator_resource->Unref();
Tensor* string_handle_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &string_handle_t));
string_handle_t->scalar<tstring>()() =
resource_handle_t.scalar<ResourceHandle>()().SerializeAsString();
}
IteratorFromStringHandleOp::IteratorFromStringHandleOp(
OpKernelConstruction* ctx)
: OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES(
ctx,
output_dtypes_.empty() || output_shapes_.empty() ||
output_dtypes_.size() == output_shapes_.size(),
errors::InvalidArgument("If both 'output_types' and 'output_shapes' "
"are set, they must have the same length."));
}
void IteratorFromStringHandleOp::Compute(OpKernelContext* ctx) {
const Tensor& string_handle_t = ctx->input(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(string_handle_t.shape()),
errors::InvalidArgument("string_handle must be a scalar"));
ResourceHandle resource_handle;
OP_REQUIRES(
ctx, resource_handle.ParseFromString(string_handle_t.scalar<tstring>()()),
errors::InvalidArgument(
"Could not parse string_handle as a valid ResourceHandle"));
OP_REQUIRES(
ctx, resource_handle.device() == ctx->device()->attributes().name(),
errors::InvalidArgument("Attempted create an iterator on device \"",
ctx->device()->attributes().name(),
"\" from handle defined on device \"",
resource_handle.device(), "\""));
IteratorResource* iterator_resource;
OP_REQUIRES_OK(ctx, LookupResource(ctx, resource_handle, &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
if (!output_dtypes_.empty()) {
OP_REQUIRES_OK(ctx, VerifyTypesMatch(output_dtypes_,
iterator_resource->output_dtypes()));
}
if (!output_shapes_.empty()) {
OP_REQUIRES_OK(ctx,
VerifyShapesCompatible(output_shapes_,
iterator_resource->output_shapes()));
}
Tensor* resource_handle_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &resource_handle_t));
resource_handle_t->scalar<ResourceHandle>()() = resource_handle;
}
SerializeIteratorOp::SerializeIteratorOp(OpKernelConstruction* ctx)
: OpKernel(ctx) {
if (ctx->HasAttr(kExternalStatePolicy)) {
int64_t external_state_policy;
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kExternalStatePolicy, &external_state_policy));
external_state_policy_ = ExternalStatePolicy(external_state_policy);
}
}
void SerializeIteratorOp::Compute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
const Tensor& resource_handle_t = ctx->input(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(resource_handle_t.shape()),
errors::InvalidArgument("resource_handle must be a scalar"));
IteratorResource* iterator_resource;
OP_REQUIRES_OK(
ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
IteratorVariantSerializer serializer;
OP_REQUIRES_OK(ctx, serializer.InitializeFromIterator(
ctx, external_state_policy_, iterator_resource));
Tensor* serialized_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({serializer.NumTensors()}),
&serialized_t));
OP_REQUIRES_OK(ctx, serializer.Serialize(serialized_t));
}
void DeserializeIteratorOp::Compute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
IteratorResource* iterator_resource;
OP_REQUIRES_OK(
ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
const Tensor* serialized_t;
OP_REQUIRES_OK(ctx, ctx->input("serialized", &serialized_t));
IteratorVariantSerializer serializer;
OP_REQUIRES_OK(ctx, serializer.InitFromTensor(serialized_t));
Status s = iterator_resource->Restore(ctx, serializer.GetReader());
if (!s.ok()) {
OP_REQUIRES_OK(
ctx,
errors::CreateWithUpdatedMessage(
s, absl::StrCat(
"Failed to restore dataset iterator from checkpoint: ",
s.message(),
". Make sure the dataset definition has not changed between "
"the process that saved the checkpoint and the process that "
"is restoring it.")));
}
}
namespace {
REGISTER_KERNEL_BUILDER(Name("Iterator").Device(DEVICE_CPU), IteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorV2").Device(DEVICE_CPU).Priority(2),
IteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorV2").Device(DEVICE_GPU).Priority(1),
IteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("MakeIterator").Device(DEVICE_CPU).Priority(2),
MakeIteratorOp);
REGISTER_KERNEL_BUILDER(
Name("MakeIterator").Device(DEVICE_GPU).Priority(1).HostMemory("dataset"),
MakeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("DeleteIterator").Device(DEVICE_CPU).Priority(2),
DeleteIteratorOp);
REGISTER_KERNEL_BUILDER(Name("DeleteIterator").Device(DEVICE_GPU).Priority(1),
DeleteIteratorOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIterator").Device(DEVICE_CPU).Priority(2),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIterator").Device(DEVICE_GPU).Priority(1),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV2").Device(DEVICE_CPU).Priority(2),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV2").Device(DEVICE_GPU).Priority(1),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV3").Device(DEVICE_CPU).Priority(2),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV3").Device(DEVICE_GPU).Priority(1),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("DatasetToSingleElement").Device(DEVICE_CPU),
ToSingleElementOp);
REGISTER_KERNEL_BUILDER(Name("OneShotIterator").Device(DEVICE_CPU),
OneShotIteratorOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE_CPU).Priority(2),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE_GPU).Priority(1),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextSync").Device(DEVICE_CPU).Priority(2),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextSync").Device(DEVICE_GPU).Priority(1),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextAsOptional").Device(DEVICE_CPU).Priority(2),
IteratorGetNextAsOptionalOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextAsOptional").Device(DEVICE_GPU).Priority(1),
IteratorGetNextAsOptionalOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorToStringHandle").Device(DEVICE_CPU).Priority(2),
IteratorToStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorToStringHandle")
.Device(DEVICE_GPU)
.HostMemory("string_handle")
.Priority(1),
IteratorToStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandle").Device(DEVICE_CPU),
IteratorFromStringHandleOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorFromStringHandleV2").Device(DEVICE_CPU).Priority(2),
IteratorFromStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandleV2")
.Device(DEVICE_GPU)
.HostMemory("string_handle")
.Priority(1),
IteratorFromStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("SerializeIterator").Device(DEVICE_CPU),
SerializeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("DeserializeIterator").Device(DEVICE_CPU),
DeserializeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetModelProto").Device(DEVICE_CPU),
IteratorGetModelProtoOp);
}
}
} | #include "tensorflow/core/kernels/data/iterator_ops.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::monitoring::testing::Histogram;
class IteratorOpsTest : public DatasetOpsTestBase {
public:
absl::StatusOr<core::RefCountPtr<IteratorResource>> GetIteratorResource() {
FunctionLibraryRuntime* flr = nullptr;
std::unique_ptr<DeviceMgr> device_mgr;
std::unique_ptr<FunctionLibraryDefinition> flib_def;
std::unique_ptr<ProcessFunctionLibraryRuntime> plfr;
TF_RETURN_IF_ERROR(dataset_ctx_->function_library()->Clone(
&flib_def, &plfr, &flr, true));
core::RefCountPtr<IteratorResource> iter_resource(
new IteratorResource(dataset_ctx_->env(), dataset_->output_dtypes(),
dataset_->output_shapes(), std::move(device_mgr),
std::move(flib_def), std::move(plfr), flr));
TF_RETURN_IF_ERROR(
iter_resource->SetIteratorFromDataset(dataset_ctx_.get(), dataset_));
return iter_resource;
}
absl::StatusOr<std::vector<std::vector<Tensor>>> GetIteratorOutput(
IteratorResource& iterator) {
std::vector<std::vector<Tensor>> output;
for (bool end_of_sequence = false; !end_of_sequence;) {
std::vector<Tensor> tensors;
TF_RETURN_IF_ERROR(
iterator.GetNext(dataset_ctx_.get(), &tensors, &end_of_sequence));
if (end_of_sequence) {
break;
}
output.push_back(std::move(tensors));
}
return output;
}
};
TEST_F(IteratorOpsTest, CollectMetrics) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<Histogram> iterator_gap("/tensorflow/data/iterator_gap");
CellReader<int64_t> throughput("/tensorflow/data/bytes_fetched");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_FLOAT_EQ(iterator_gap.Delta().num(), 0.0);
EXPECT_EQ(throughput.Delta(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0.0);
EXPECT_EQ(iterator_busy.Delta(), 0.0);
RangeDatasetParams dataset_params = RangeDatasetParams(0, 10, 3);
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK_AND_ASSIGN(core::RefCountPtr<IteratorResource> iter_resource,
GetIteratorResource());
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::vector<Tensor>> output,
GetIteratorOutput(*iter_resource));
EXPECT_EQ(output.size(), 4);
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 5.0);
EXPECT_GT(latency_histogram.sum(), 0.0);
Histogram iterator_gap_histogram = iterator_gap.Delta();
EXPECT_FLOAT_EQ(iterator_gap_histogram.num(), 5.0);
EXPECT_GT(iterator_gap_histogram.sum(), 0.0);
EXPECT_GT(throughput.Delta(), 0);
EXPECT_GT(iterator_lifetime.Delta(), 0);
EXPECT_GT(iterator_busy.Delta(), 0.0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/iterator_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/iterator_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37f2e820-6f6b-4f58-9937-76f4d6f94147 | cpp | tensorflow/tensorflow | padded_batch_dataset_op | tensorflow/core/kernels/data/padded_batch_dataset_op.cc | tensorflow/core/kernels/data/padded_batch_dataset_op_test.cc | #include "tensorflow/core/kernels/data/padded_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const PaddedBatchDatasetOp::kDatasetType;
constexpr const char* const PaddedBatchDatasetOp::kInputDataset;
constexpr const char* const PaddedBatchDatasetOp::kBatchSize;
constexpr const char* const PaddedBatchDatasetOp::kPaddedShapes;
constexpr const char* const PaddedBatchDatasetOp::kPaddingValues;
constexpr const char* const PaddedBatchDatasetOp::kDropRemainder;
constexpr const char* const PaddedBatchDatasetOp::kParallelCopy;
constexpr const char* const PaddedBatchDatasetOp::kToutputTypes;
constexpr const char* const PaddedBatchDatasetOp::kOutputShapes;
constexpr const char* const PaddedBatchDatasetOp::kNumPaddedShapes;
constexpr char kExhausted[] = "exhausted";
class PaddedBatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t batch_size, bool drop_remainder,
bool parallel_copy, std::vector<PartialTensorShape> padded_shapes,
std::vector<Tensor> padding_values, const DatasetBase* input,
int op_version)
: DatasetBase(DatasetContext(ctx)),
batch_size_(batch_size),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
padded_shapes_(std::move(padded_shapes)),
padding_values_(std::move(padding_values)),
input_(input),
op_version_(op_version),
traceme_metadata_(
{{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"},
{"parallel_copy", parallel_copy ? "true" : "false"}}) {
input_->Ref();
const auto& input_shapes = input_->output_shapes();
output_shapes_.reserve(input_shapes.size());
for (size_t i = 0; i < input_shapes.size(); ++i) {
if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) {
output_shapes_.push_back(
PartialTensorShape({batch_size_}).Concatenate(padded_shapes_[i]));
} else {
output_shapes_.push_back(
PartialTensorShape({-1}).Concatenate(padded_shapes_[i]));
}
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(batch_size_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size));
std::vector<Node*> padded_shapes;
padded_shapes.reserve(padded_shapes_.size());
for (int i = 0; i < padded_shapes_.size(); i++) {
Node* node;
Tensor t(DT_INT64, TensorShape({padded_shapes_[i].dims()}));
for (int j = 0; j < padded_shapes_[i].dims(); j++) {
t.vec<int64_t>()(j) = padded_shapes_[i].dim_size(j);
}
TF_RETURN_IF_ERROR(b->AddTensor(t, &node));
padded_shapes.emplace_back(node);
}
std::vector<Node*> padding_values;
padding_values.reserve(padding_values_.size());
for (const Tensor& t : padding_values_) {
Node* node;
TF_RETURN_IF_ERROR(b->AddTensor(t, &node));
padding_values.emplace_back(node);
}
Node* drop_remainder = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder));
AttrValue parallel_copy;
b->BuildAttrValue(parallel_copy_, ¶llel_copy);
AttrValue output_types;
b->BuildAttrValue(output_dtypes(), &output_types);
AttrValue N;
b->BuildAttrValue<int64_t>(padded_shapes_.size(), &N);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_graph_node}, {1, batch_size}, {4, drop_remainder}},
{{2, padded_shapes}, {3, padding_values}},
{{kParallelCopy, parallel_copy},
{kToutputTypes, output_types},
{kNumPaddedShapes, N}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::vector<std::vector<Tensor>> batch_elements;
{
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
} else {
*end_of_sequence = false;
batch_elements.reserve(dataset()->batch_size_);
for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence;
++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
end_of_sequence));
if (!*end_of_sequence) {
batch_elements.push_back(std::move(batch_element_tuple));
}
}
if (*end_of_sequence) {
input_impl_.reset();
}
}
}
if (batch_elements.empty()) {
DCHECK(*end_of_sequence);
return absl::OkStatus();
}
if (dataset()->drop_remainder_ &&
batch_elements.size() < dataset()->batch_size_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CopyBatch(ctx, batch_elements, out_tensors));
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args), dataset()->batch_size_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kExhausted, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_exhausted;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kExhausted, &input_exhausted));
if (static_cast<bool>(input_exhausted)) {
input_impl_.reset();
} else {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
Status CopyBatch(IteratorContext* ctx,
const std::vector<std::vector<Tensor>>& batch_elements,
std::vector<Tensor>* out_tensors) {
const size_t num_tuple_components = batch_elements[0].size();
const int64_t num_batch_elements = batch_elements.size();
for (size_t component_index = 0; component_index < num_tuple_components;
++component_index) {
TensorShape batch_component_shape({num_batch_elements});
const PartialTensorShape& padded_shape =
dataset()->padded_shapes_[component_index];
for (int dim = 0; dim < padded_shape.dims(); ++dim) {
if (padded_shape.dim_size(dim) == -1) {
TF_RETURN_IF_ERROR(batch_component_shape.AddDimWithStatus(0));
} else {
TF_RETURN_IF_ERROR(batch_component_shape.AddDimWithStatus(
padded_shape.dim_size(dim)));
}
}
for (int64_t i = 0; i < num_batch_elements; ++i) {
const TensorShape& element_shape =
batch_elements[i][component_index].shape();
if (element_shape.dims() != padded_shape.dims()) {
return errors::InvalidArgument(
"All elements in a batch must have the same rank as the "
"padded shape for component",
component_index, ": expected rank ", padded_shape.dims(),
" but got element with rank ", element_shape.dims());
}
for (int dim = 0; dim < padded_shape.dims(); ++dim) {
if (padded_shape.dim_size(dim) == -1) {
if (batch_elements[i][component_index].shape().dim_size(dim) >
batch_component_shape.dim_size(dim + 1)) {
batch_component_shape.set_dim(
dim + 1,
batch_elements[i][component_index].shape().dim_size(dim));
}
} else {
if (batch_elements[i][component_index].shape().dim_size(dim) >
batch_component_shape.dim_size(dim + 1)) {
return errors::DataLoss(
"Attempted to pad to a smaller size than the input "
"element.");
}
}
}
}
out_tensors->emplace_back(ctx->allocator({}),
output_dtypes()[component_index],
batch_component_shape);
Tensor& batch_component = out_tensors->back();
TF_RETURN_IF_ERROR(batch_util::SetElementZero(
&batch_component, dataset()->padding_values_[component_index]));
TensorShape component_shape({});
for (int i = 1; i < batch_component_shape.dims(); ++i) {
TF_RETURN_IF_ERROR(component_shape.AddDimWithStatus(
batch_component_shape.dim_size(i)));
}
auto copy_element_fn = [component_index, &batch_elements,
&batch_component, &component_shape](int index) {
if (batch_elements[index][component_index].shape() ==
component_shape) {
TF_RETURN_IF_ERROR(batch_util::CopyElementToSlice(
batch_elements[index][component_index], &batch_component,
index));
} else {
TF_RETURN_IF_ERROR(batch_util::CopyElementToLargerSlice(
batch_elements[index][component_index], &batch_component,
index));
}
return absl::OkStatus();
};
if (dataset()->parallel_copy_ && (batch_component.AllocatedBytes() /
num_batch_elements) >= (1 << 15)) {
BlockingCounter counter(num_batch_elements);
Status status;
mutex status_mu;
const auto num_threads = ctx->runner_threadpool_size();
const auto slice_size = num_batch_elements / num_threads;
int64_t offset = 0;
for (size_t i = 0; i < num_threads; ++i) {
int64_t length = slice_size;
if (i < num_batch_elements % num_threads) ++length;
(*ctx->runner())([offset, length, &status, &status_mu, &counter,
©_element_fn]() {
for (size_t j = offset; j < offset + length; ++j) {
{
Status s = copy_element_fn(j);
mutex_lock l(status_mu);
status.Update(s);
}
counter.DecrementCount();
}
});
offset += length;
}
counter.Wait();
TF_RETURN_IF_ERROR(status);
} else {
for (size_t i = 0; i < num_batch_elements; ++i) {
TF_RETURN_IF_ERROR(copy_element_fn(i));
}
}
}
return absl::OkStatus();
}
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t batch_size_;
const bool drop_remainder_;
const bool parallel_copy_;
const std::vector<PartialTensorShape> padded_shapes_;
const std::vector<Tensor> padding_values_;
const DatasetBase* const input_;
const int op_version_;
std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
};
PaddedBatchDatasetOp::PaddedBatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->def().op() == "PaddedBatchDataset" ? 1 : 2) {
if (ctx->HasAttr(kParallelCopy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, ¶llel_copy_));
}
}
void PaddedBatchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t batch_size;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("Batch size must be greater than zero."));
bool drop_remainder = false;
if (op_version_ > 1) {
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
}
OpInputList padded_shape_tensors;
OP_REQUIRES_OK(ctx, ctx->input_list(kPaddedShapes, &padded_shape_tensors));
std::vector<PartialTensorShape> padded_shapes;
padded_shapes.reserve(padded_shape_tensors.size());
OP_REQUIRES(ctx, padded_shape_tensors.size() == input->output_shapes().size(),
errors::InvalidArgument("Number of padded shapes (",
padded_shape_tensors.size(),
") must match the number of components "
"in the input dataset's elements (",
input->output_shapes().size(), ")"));
for (const Tensor& padded_shape_t : padded_shape_tensors) {
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(padded_shape_t.shape()),
errors::InvalidArgument("All padded shapes must be vectors"));
PartialTensorShape padded_shape;
OP_REQUIRES_OK(ctx, PartialTensorShape::MakePartialShape(
padded_shape_t.vec<int64_t>().data(),
padded_shape_t.NumElements(), &padded_shape));
padded_shapes.push_back(std::move(padded_shape));
}
OpInputList padding_values_list;
OP_REQUIRES_OK(ctx, ctx->input_list(kPaddingValues, &padding_values_list));
std::vector<Tensor> padding_values;
OP_REQUIRES(ctx, padding_values_list.size() == input->output_shapes().size(),
errors::InvalidArgument(
"Number of padding values (", padding_values_list.size(),
") must match the number of components in the input "
"dataset's elements (",
input->output_shapes().size(), ")"));
for (int i = 0; i < padding_values_list.size(); ++i) {
const Tensor& padding_value_t = padding_values_list[i];
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(padding_value_t.shape()),
errors::InvalidArgument("All padding values must be scalars"));
OP_REQUIRES(ctx, padding_value_t.dtype() == input->output_dtypes()[i],
errors::InvalidArgument(
"Mismatched type between padding value ", i,
" and input dataset's component ", i, ": ",
DataTypeString(padding_value_t.dtype()), " vs. ",
DataTypeString(input->output_dtypes()[i])));
padding_values.push_back(tensor::DeepCopy(padding_value_t));
}
*output = new Dataset(ctx, batch_size, drop_remainder, parallel_copy_,
std::move(padded_shapes), std::move(padding_values),
input, op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("PaddedBatchDataset").Device(DEVICE_CPU),
PaddedBatchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("PaddedBatchDatasetV2").Device(DEVICE_CPU),
PaddedBatchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/padded_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "padded_batch_dataset";
constexpr int kOpVersion = 2;
class PaddedBatchDatasetOpTest : public DatasetOpsTestBase {};
class PaddedBatchDatasetParams : public DatasetParams {
public:
template <typename T>
PaddedBatchDatasetParams(T input_dataset_params, int64_t batch_size,
std::vector<Tensor> padded_shapes,
std::vector<Tensor> padded_values,
bool drop_remainder, bool parallel_copy,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int num_padded_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
batch_size_(batch_size),
padded_shapes_(std::move(padded_shapes)),
padded_values_(std::move(padded_values)),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
num_padded_shapes_(num_padded_shapes) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {batch_size_}));
for (auto& padded_shape : padded_shapes_) {
input_tensors.emplace_back(padded_shape);
}
for (auto& padded_value : padded_values_) {
input_tensors.emplace_back(padded_value);
}
input_tensors.emplace_back(
CreateTensor<bool>(TensorShape({}), {drop_remainder_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {PaddedBatchDatasetOp::kInputDataset,
PaddedBatchDatasetOp::kBatchSize};
for (int i = 0; i < num_padded_shapes_; ++i) {
input_names->emplace_back(
strings::StrCat(PaddedBatchDatasetOp::kPaddedShapes, "_", i));
}
for (int j = 0; j < padded_values_.size(); ++j) {
input_names->emplace_back(
strings::StrCat(PaddedBatchDatasetOp::kPaddingValues, "_", j));
}
input_names->push_back(PaddedBatchDatasetOp::kDropRemainder);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"parallel_copy", parallel_copy_},
{"Toutput_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"N", num_padded_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return PaddedBatchDatasetOp::kDatasetType;
}
private:
int64_t batch_size_;
std::vector<Tensor> padded_shapes_;
std::vector<Tensor> padded_values_;
bool drop_remainder_;
bool parallel_copy_;
int num_padded_shapes_;
};
PaddedBatchDatasetParams PaddedBatchDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(
TensorShape{7, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})},
"tensor_slice");
return PaddedBatchDatasetParams(
tensor_slice_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
true,
true,
{DT_INT64},
{PartialTensorShape({2, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams2() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
true,
true,
{DT_INT64},
{PartialTensorShape({2, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams3() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({2, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams4() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 1}, {{6, 7, 8}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams5() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {-1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
false,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams6() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {-1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams7() {
return PaddedBatchDatasetParams(
RangeDatasetParams(0, 0, 1),
2,
{CreateTensor<int64_t>(TensorShape{1}, {-1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParamsWithShortPaddingShape() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidPaddingShape() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{2}, {1, 2})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidBatchSize() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
-1,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingShapesSize() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3}),
CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
2,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingValuesSize() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1}),
CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
2,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingValuesDType() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<tstring>(TensorShape{}, {"a"})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingValuesShape() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{1}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
std::vector<GetNextTestCase<PaddedBatchDatasetParams>> GetNextTestCases() {
return {{PaddedBatchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 7, 1}, {8, 9, 1, 10, 11, 1}})},
{PaddedBatchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape{2, 3}, {0, 1, 1, 2, 3, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {4, 5, 1, 6, 1, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {7, 1, 1, 8, 1, 1}),
CreateTensor<int64_t>(TensorShape{1, 3}, {9, 1, 1})}},
{PaddedBatchDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams5(),
{CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}),
CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}),
CreateTensor<int64_t>(TensorShape{1, 1}, {9})}},
{PaddedBatchDatasetParams6(),
{CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}),
CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}),
CreateTensor<int64_t>(TensorShape{1, 1}, {9})}},
{PaddedBatchDatasetParams7(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
GetNextTestCases())
TEST_F(PaddedBatchDatasetOpTest, DatasetNodeName) {
auto dataset_params = PaddedBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(PaddedBatchDatasetOpTest, DatasetTypeString) {
auto dataset_params = PaddedBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(PaddedBatchDatasetOp::kDatasetType, params)));
}
std::vector<DatasetOutputDtypesTestCase<PaddedBatchDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{PaddedBatchDatasetParams1(),
{DT_INT64}},
{PaddedBatchDatasetParams2(),
{DT_INT64}},
{PaddedBatchDatasetParams3(),
{DT_INT64}},
{PaddedBatchDatasetParams4(),
{DT_INT64}},
{PaddedBatchDatasetParams5(),
{DT_INT64}},
{PaddedBatchDatasetParams6(),
{DT_INT64}},
{PaddedBatchDatasetParams7(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<PaddedBatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{PaddedBatchDatasetParams1(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams2(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams3(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams4(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams5(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams6(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams7(),
{PartialTensorShape({-1, -1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<PaddedBatchDatasetParams>>
CardinalityTestCases() {
return {{PaddedBatchDatasetParams1(),
3},
{PaddedBatchDatasetParams2(),
3},
{PaddedBatchDatasetParams3(),
4},
{PaddedBatchDatasetParams4(),
3},
{PaddedBatchDatasetParams5(),
4},
{PaddedBatchDatasetParams6(),
4},
{PaddedBatchDatasetParams7(),
0}};
}
DATASET_CARDINALITY_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<PaddedBatchDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{PaddedBatchDatasetParams1(),
{DT_INT64}},
{PaddedBatchDatasetParams2(),
{DT_INT64}},
{PaddedBatchDatasetParams3(),
{DT_INT64}},
{PaddedBatchDatasetParams4(),
{DT_INT64}},
{PaddedBatchDatasetParams5(),
{DT_INT64}},
{PaddedBatchDatasetParams6(),
{DT_INT64}},
{PaddedBatchDatasetParams7(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(PaddedBatchDatasetOpTest,
PaddedBatchDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<PaddedBatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{PaddedBatchDatasetParams1(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams2(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams3(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams4(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams5(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams6(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams7(),
{PartialTensorShape({-1, -1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(PaddedBatchDatasetOpTest,
PaddedBatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(PaddedBatchDatasetOpTest, IteratorPrefix) {
auto dataset_params = PaddedBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(PaddedBatchDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<PaddedBatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{PaddedBatchDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 7, 1}, {8, 9, 1, 10, 11, 1}})},
{PaddedBatchDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams3(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{2, 3}, {0, 1, 1, 2, 3, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {4, 5, 1, 6, 1, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {7, 1, 1, 8, 1, 1}),
CreateTensor<int64_t>(TensorShape{1, 3}, {9, 1, 1})}},
{PaddedBatchDatasetParams4(),
{0, 2, 5},
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams5(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}),
CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}),
CreateTensor<int64_t>(TensorShape{1, 1}, {9})}},
{PaddedBatchDatasetParams6(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}),
CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}),
CreateTensor<int64_t>(TensorShape{1, 1}, {9})}},
{PaddedBatchDatasetParams7(),
{0, 2, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(PaddedBatchDatasetOpTest,
PaddedBatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(PaddedBatchDatasetOpTest, ShortPadding) {
auto dataset_params = PaddedBatchDatasetParamsWithShortPaddingShape();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
tensorflow::error::DATA_LOSS);
}
TEST_F(PaddedBatchDatasetOpTest, InvalidPaddedShapes) {
auto dataset_params = PaddedBatchDatasetParamsWithInvalidPaddingShape();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
class ParameterizedInvalidArgumentTest
: public PaddedBatchDatasetOpTest,
public ::testing::WithParamInterface<PaddedBatchDatasetParams> {};
TEST_P(ParameterizedInvalidArgumentTest, InvalidPredicateFunc) {
auto dataset_params = GetParam();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
INSTANTIATE_TEST_SUITE_P(
PaddedBatchDatasetOpTest, ParameterizedInvalidArgumentTest,
::testing::ValuesIn(
{PaddedBatchDatasetParamsWithInvalidBatchSize(),
PaddedBatchDatasetParamsWithInvalidPaddingShapesSize(),
PaddedBatchDatasetParamsWithInvalidPaddingValuesSize(),
PaddedBatchDatasetParamsWithInvalidPaddingValuesDType(),
PaddedBatchDatasetParamsWithInvalidPaddingValuesShape()}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/padded_batch_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/padded_batch_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7794f54-4a87-4d2c-aa6d-221068b35fe2 | cpp | tensorflow/tensorflow | reduce_dataset_op | tensorflow/core/kernels/data/reduce_dataset_op.cc | tensorflow/core/kernels/data/reduce_dataset_op_test.cc | #include "tensorflow/core/kernels/data/reduce_dataset_op.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
namespace {
const char kOutputShapes[] = "output_shapes";
const char kOutputTypes[] = "output_types";
}
ReduceDatasetOp::ReduceDatasetOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_reduce_dataset") {
FunctionMetadata::Params params;
OP_REQUIRES_OK(ctx, ctx->GetAttr("use_inter_op_parallelism",
¶ms.use_inter_op_parallelism));
params.use_default_device = false;
OP_REQUIRES_OK(ctx,
FunctionMetadata::Create(ctx, "f", params, &func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
Status ReduceDatasetOp::DoCompute(OpKernelContext* ctx) {
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode("ReduceDatasetOp::DoCompute",
{{"id", ctx->step_id()}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("ReduceDatasetOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
OpInputList inputs;
TF_RETURN_IF_ERROR(ctx->input_list("initial_state", &inputs));
std::vector<Tensor> state(inputs.begin(), inputs.end());
std::unique_ptr<CapturedFunction> captured_func;
TF_RETURN_IF_ERROR(CapturedFunction::Create(
ctx, func_metadata_, "other_arguments", &captured_func));
IteratorContext::Params params(ctx);
auto function_handle_cache =
std::make_unique<FunctionHandleCache>(params.flr);
params.function_handle_cache = function_handle_cache.get();
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func;
TF_RETURN_IF_ERROR(
captured_func->Instantiate(&iter_ctx, &instantiated_captured_func));
std::unique_ptr<IteratorBase> iterator;
if (ctx->function_library()->device()->device_type() == DEVICE_CPU) {
DatasetBase* finalized_dataset = nullptr;
TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset));
core::ScopedUnref unref(finalized_dataset);
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(
&iter_ctx, nullptr, "ReduceIterator", &iterator));
} else {
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iter_ctx, nullptr,
"ReduceIterator", &iterator));
}
while (true) {
if (ctx->cancellation_manager()->IsCancelled()) {
return errors::Cancelled("Operation was cancelled");
}
std::vector<Tensor> next_input_element;
bool end_of_input;
TF_RETURN_IF_ERROR(
iterator->GetNext(&iter_ctx, &next_input_element, &end_of_input));
if (end_of_input) {
break;
}
std::vector<Tensor> args;
args.reserve(state.size() + next_input_element.size());
std::copy(state.begin(), state.end(), std::back_inserter(args));
std::copy(next_input_element.begin(), next_input_element.end(),
std::back_inserter(args));
std::vector<Tensor> reduce_func_output;
TF_RETURN_IF_ERROR(instantiated_captured_func->Run(
&iter_ctx, std::move(args), &reduce_func_output, nullptr));
if (reduce_func_output.size() != state.size()) {
return errors::InvalidArgument(
"The number of components of the initial state and the "
"reduce "
"function output does not match. (initial_state=",
state.size(), ", output=", reduce_func_output.size(), ").");
}
std::swap(reduce_func_output, state);
}
TF_RETURN_IF_ERROR(VerifyTypesMatch(output_types_, state));
TF_RETURN_IF_ERROR(VerifyShapesCompatible(output_shapes_, state));
for (size_t i = 0; i < state.size(); ++i) {
ctx->set_output(i, state[i]);
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ReduceDataset").Device(DEVICE_CPU),
ReduceDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ReduceDataset");
}
}
} | #include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "reduce_dataset";
class ReduceDatasetParams : public DatasetParams {
public:
template <typename T>
ReduceDatasetParams(T input_dataset_params, std::vector<Tensor> initial_state,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_state, DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
bool use_inter_op_parallelism, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
initial_state_(std::move(initial_state)),
other_arguments_(std::move(other_arguments)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_state_(std::move(type_state)),
type_arguments_(std::move(type_arguments)),
use_inter_op_parallelism_(use_inter_op_parallelism) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = initial_state_;
input_tensors.insert(input_tensors.end(), other_arguments_.begin(),
other_arguments_.end());
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back("input_dataset");
for (int i = 0; i < initial_state_.size(); ++i) {
input_names->emplace_back(strings::StrCat("initial_state_", i));
}
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(strings::StrCat("other_arguments_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
*attr_vector = {{"f", func_},
{"Tstate", type_state_},
{"Targuments", type_arguments_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"use_inter_op_parallelism", use_inter_op_parallelism_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return "Reduce"; }
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> initial_state_;
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_state_;
DataTypeVector type_arguments_;
bool use_inter_op_parallelism_;
};
class ReduceDatasetOpTest : public DatasetOpsTestBase {};
ReduceDatasetParams ReduceDatasetParams1() {
return ReduceDatasetParams(
RangeDatasetParams(0, 10, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}}),
{},
FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}}),
{test::function::XAddY()},
{DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
kNodeName);
}
ReduceDatasetParams ReduceDatasetParams2() {
return ReduceDatasetParams(
RangeDatasetParams(1, 10, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}, {1}}),
{},
FunctionDefHelper::FunctionRef("XPlusOneXTimesY", {{"T", DT_INT64}}),
{test::function::XPlusOneXTimesY()},
{DT_INT64, DT_INT64},
{},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
true,
kNodeName);
}
ReduceDatasetParams ReduceDatasetParams3() {
return ReduceDatasetParams(
RangeDatasetParams(0, 0, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}, {3}}),
{},
FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}}),
{test::function::XAddY()},
{DT_INT64, DT_INT64},
{},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
true,
kNodeName);
}
std::vector<GetNextTestCase<ReduceDatasetParams>> GetNextTestCases() {
return {{
ReduceDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{46}})},
{ReduceDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}),
{{10}, {1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9}})},
{
ReduceDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{1}, {3}})}};
}
class ParameterizedReduceDatasetOpTest
: public ReduceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<ReduceDatasetParams>> {};
TEST_P(ParameterizedReduceDatasetOpTest, Compute) {
auto test_case = GetParam();
TF_ASSERT_OK(InitializeRuntime(test_case.dataset_params));
std::vector<Tensor> output;
TF_ASSERT_OK(RunDatasetOp(test_case.dataset_params, &output));
TF_EXPECT_OK(
ExpectEqual(test_case.expected_outputs, output, true));
}
INSTANTIATE_TEST_SUITE_P(ReduceDatasetOpTest, ParameterizedReduceDatasetOpTest,
::testing::ValuesIn(GetNextTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/reduce_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/reduce_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
58e8fdc9-af26-4644-a6db-e71733275634 | cpp | tensorflow/tensorflow | parallel_batch_dataset_op | tensorflow/core/kernels/data/parallel_batch_dataset_op.cc | tensorflow/core/kernels/data/parallel_batch_dataset_op_test.cc | #include "tensorflow/core/kernels/data/parallel_batch_dataset_op.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelBatchDatasetOp::kDatasetType;
constexpr const char* const ParallelBatchDatasetOp::kInputDataset;
constexpr const char* const ParallelBatchDatasetOp::kBatchSize;
constexpr const char* const
ParallelBatchDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelBatchDatasetOp::kDropRemainder;
constexpr const char* const ParallelBatchDatasetOp::kParallelCopy;
constexpr const char* const ParallelBatchDatasetOp::kOutputTypes;
constexpr const char* const ParallelBatchDatasetOp::kOutputShapes;
constexpr const char* const ParallelBatchDatasetOp::kDeterministic;
namespace {
constexpr char kBatchResultsSize[] = "batch_results_size";
constexpr char kTFDataParallelBatch[] = "tf_data_parallel_batch";
constexpr char kBatchResults[] = "batch_results";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kNumElements[] = "num_elements";
constexpr char kCallFinished[] = "call_finished";
constexpr char kOutputAllocated[] = "output_allocated";
constexpr char kStatus[] = "status";
}
class ParallelBatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t batch_size, int64_t num_parallel_calls,
bool drop_remainder, bool parallel_copy, const DatasetBase* input,
DeterminismPolicy deterministic)
: DatasetBase(DatasetContext(ctx)),
batch_size_(batch_size),
reserve_size_(drop_remainder ? batch_size
: std::min<int64_t>(batch_size, 1 << 16)),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
input_(input),
deterministic_(deterministic),
traceme_metadata_(
{{"autotune",
num_parallel_calls == model::kAutotune ? "true" : "false"},
{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"},
{"parallel_copy", parallel_copy ? "true" : "false"}}) {
input_->Ref();
const auto& input_shapes = input_->output_shapes();
output_shapes_.reserve(input_shapes.size());
for (const auto& input_shape : input_shapes) {
if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) {
output_shapes_.emplace_back(
PartialTensorShape({batch_size_}).Concatenate(input_shape));
} else {
output_shapes_.emplace_back(
PartialTensorShape({-1}).Concatenate(input_shape));
}
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(batch_size_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size));
Node* num_parallel_calls = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(num_parallel_calls_, &num_parallel_calls));
Node* drop_remainder = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder));
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue parallel_copy_attr;
b->BuildAttrValue(parallel_copy_, ¶llel_copy_attr);
attrs.emplace_back(kParallelCopy, parallel_copy_attr);
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, batch_size, num_parallel_calls, drop_remainder},
attrs, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()) {}
~Iterator() override {
CancelThreads(true);
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
if (dataset()->parallel_copy_) {
num_parallel_calls_->value = 1;
} else {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
IteratorContext iter_ctx(std::move(params));
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&iter_ctx, this, prefix(), &input_impl_));
ctx->MergeCheckpoint(iter_ctx.checkpoint());
if (ctx->warm_start() && !ctx->is_restoring()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<BatchResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(&result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelBatchConsume",
{{"element_id", result->uid}});
});
mutex_lock l(result->mu);
auto cleanup =
gtl::MakeCleanup([result]() TF_EXCLUSIVE_LOCKS_REQUIRED(
&BatchResult::mu) { result->output.clear(); });
if (result->output_allocated) {
RecordBufferDequeue(ctx, result->output);
}
ctx->MergeCheckpoint(&result->checkpoint);
TF_RETURN_IF_ERROR(
ProcessBatch(dataset()->batch_size_, result->num_elements,
dataset()->drop_remainder_, result->status, ctx,
out_tensors, end_of_sequence, &result->output));
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncKnownRatioNode(
std::move(args),
dataset()->batch_size_, 1.0,
{model::MakeParameter("parallelism", num_parallel_calls_, 1,
ctx->runner_threadpool_size())});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
if (ctx->symbolic_checkpoint()) {
return writer->WriteScalar(prefix(), kBatchResultsSize, 0);
}
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
DCHECK_EQ(num_calls_, 0);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kBatchResultsSize,
batch_results_.size()));
for (size_t i = 0; i < batch_results_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteBatchResult(writer, i));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
DCHECK(!runner_thread_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t batch_results_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBatchResultsSize, &batch_results_size));
DCHECK(batch_results_.empty());
for (int i = 0; i < batch_results_size; ++i) {
TF_RETURN_IF_ERROR(ReadBatchResult(ctx, reader, i));
}
if (ctx->warm_start()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
auto result = dataset()->traceme_metadata_;
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
struct BatchResult {
explicit BatchResult(IteratorContext* ctx)
: end_of_input(false),
num_elements(0),
status(absl::OkStatus()),
call_finished(false),
output_allocated(false),
uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
mutex mu;
bool end_of_input TF_GUARDED_BY(mu);
int64_t num_elements TF_GUARDED_BY(mu);
std::vector<Tensor> output TF_GUARDED_BY(mu);
Status status TF_GUARDED_BY(mu);
bool call_finished TF_GUARDED_BY(&Iterator::mu_);
bool output_allocated TF_GUARDED_BY(mu);
const int64_t uid = -1;
MemoryCheckpoint checkpoint;
};
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<BatchResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->call_finished = true;
cond_var_->notify_all();
}
void CallBatching(std::shared_ptr<IteratorContext> ctx,
const std::shared_ptr<BatchResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelBatchProduce",
{{"element_id", result->uid}});
});
if (!input_impl_) {
CallCompleted(ctx, result);
return;
}
std::vector<std::vector<Tensor>> batch_elements;
batch_elements.reserve(dataset()->reserve_size_);
bool end_of_input = false;
for (int i = 0; i < dataset()->batch_size_ && !end_of_input; ++i) {
std::vector<Tensor> batch_element_tuple;
Status status = input_impl_->GetNext(ctx.get(), &batch_element_tuple,
&end_of_input);
{
mutex_lock l(result->mu);
result->end_of_input = result->end_of_input || end_of_input;
result->status.Update(status);
result->checkpoint.Merge(ctx->checkpoint());
if (result->end_of_input || !result->status.ok()) break;
}
if (!end_of_input) {
batch_elements.emplace_back(std::move(batch_element_tuple));
mutex_lock l(result->mu);
result->num_elements++;
} else {
input_impl_.reset();
}
}
if (batch_elements.empty()) {
CallCompleted(ctx, result);
return;
}
auto copy_elements_fn = [this, ctx, result,
batch_elements =
std::move(batch_elements)]() mutable {
Status status;
{
mutex_lock l(result->mu);
status = CopyBatch(AnyContext(ctx.get()), std::move(batch_elements),
dataset()->parallel_copy_, &result->output);
result->status.Update(status);
if (result->status.ok()) {
result->output_allocated = true;
RecordBufferEnqueue(ctx.get(), result->output);
} else {
result->output.clear();
result->output_allocated = false;
}
}
CallCompleted(ctx, result);
return status;
};
(*ctx->runner())(std::move(copy_elements_fn));
}
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto new_ctx = std::make_shared<IteratorContext>(*ctx);
runner_thread_ =
ctx->StartThread(kTFDataParallelBatch,
std::bind(&Iterator::RunnerThread, this, new_ctx));
}
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
std::vector<std::shared_ptr<BatchResult>> new_calls;
RecordStart(ctx.get());
auto stop_cleanup =
gtl::MakeCleanup([this, &ctx]() { RecordStop(ctx.get()); });
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
batch_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
batch_results_.push_back(std::make_shared<BatchResult>(ctx.get()));
new_calls.emplace_back(batch_results_.back());
num_calls_++;
}
}
for (const auto& call : new_calls) {
CallBatching(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(std::shared_ptr<BatchResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
if (!deterministic_) {
bool find_batch;
for (auto it = batch_results_.begin(); it != batch_results_.end();
++it) {
if (!(*it)->call_finished) continue;
find_batch = (it == batch_results_.begin());
if (!find_batch) {
tf_shared_lock l((*it)->mu);
find_batch = !(*it)->end_of_input;
}
if (find_batch) {
std::swap(*result, *it);
batch_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else if (!batch_results_.empty() &&
batch_results_.front()->call_finished) {
std::swap(*result, batch_results_.front());
batch_results_.pop_front();
cond_var_->notify_all();
return false;
}
return true;
}
Status ReadBatchResult(IteratorContext* ctx, IteratorStateReader* reader,
size_t index) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
batch_results_.push_back(std::make_shared<BatchResult>(ctx));
std::shared_ptr<BatchResult> result = batch_results_.back();
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
result->end_of_input = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
&result->num_elements));
result->call_finished = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kCallFinished));
result->output_allocated = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated));
TF_RETURN_IF_ERROR(ReadBatch(ctx, reader, dataset()->batch_size_,
prefix(), batch_prefix, &result->output));
TF_RETURN_IF_ERROR(ReadStatus(prefix(),
strings::StrCat(batch_prefix, "_", kStatus),
reader, &result->status));
if (result->output_allocated) {
RecordBufferEnqueue(ctx, result->output);
}
return absl::OkStatus();
}
Status WriteBatchResult(IteratorStateWriter* writer, size_t index)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
std::shared_ptr<BatchResult> result = batch_results_[index];
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
if (result->end_of_input) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput), ""));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
result->num_elements));
if (result->call_finished) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kCallFinished), ""));
}
if (result->output_allocated) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated),
""));
}
TF_RETURN_IF_ERROR(WriteBatch(dataset()->batch_size_,
result->num_elements, prefix(),
batch_prefix, writer, &result->output));
TF_RETURN_IF_ERROR(
WriteStatus(prefix(), strings::StrCat(batch_prefix, "_", kStatus),
result->status, writer));
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
const bool deterministic_;
std::unique_ptr<CancellationManager> cancellation_manager_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<BatchResult>> batch_results_ TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
};
const int64_t batch_size_;
const int64_t reserve_size_;
const int64_t num_parallel_calls_;
const bool drop_remainder_;
const bool parallel_copy_;
const DatasetBase* const input_;
std::vector<PartialTensorShape> output_shapes_;
const DeterminismPolicy deterministic_;
const TraceMeMetadata traceme_metadata_;
};
ParallelBatchDatasetOp::ParallelBatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kDeterministic)) {
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(
ctx, DeterminismPolicy::FromString(deterministic, &deterministic_));
}
if (ctx->HasAttr(kParallelCopy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, ¶llel_copy_));
}
}
void ParallelBatchDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t batch_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("Batch size must be greater than zero."));
int64_t num_parallel_calls = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kNumParallelCalls,
&num_parallel_calls));
bool drop_remainder = false;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
*output = new Dataset(ctx, batch_size, num_parallel_calls, drop_remainder,
parallel_copy_, input, deterministic_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ParallelBatchDataset").Device(DEVICE_CPU),
ParallelBatchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/parallel_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_batch_dataset";
constexpr int kOpVersion = 1;
class ParallelBatchDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelBatchDatasetParams(T input_dataset_params, int64_t batch_size,
int64_t num_parallel_calls, bool drop_remainder,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
const bool parallel_copy,
const std::string& deterministic, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
batch_size_(batch_size),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
deterministic_(deterministic) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
Tensor batch_size = CreateTensor<int64_t>(TensorShape({}), {batch_size_});
Tensor num_parallel_calls =
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_});
Tensor drop_remainder =
CreateTensor<bool>(TensorShape({}), {drop_remainder_});
return {batch_size, num_parallel_calls, drop_remainder};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {ParallelBatchDatasetOp::kInputDataset,
ParallelBatchDatasetOp::kBatchSize,
ParallelBatchDatasetOp::kNumParallelCalls,
ParallelBatchDatasetOp::kDropRemainder};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{"parallel_copy", parallel_copy_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"deterministic", deterministic_},
{"metadata", ""},
};
return absl::OkStatus();
};
string dataset_type() const override {
return ParallelBatchDatasetOp::kDatasetType;
}
private:
int64_t batch_size_;
int64_t num_parallel_calls_;
bool drop_remainder_;
bool parallel_copy_;
std::string deterministic_;
};
class ParallelBatchDatasetOpTest : public DatasetOpsTestBase {};
ParallelBatchDatasetParams ParallelBatchDatasetParams1() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
1,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams2() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
1,
true,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams3() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
3,
1,
false,
{DT_INT64},
{PartialTensorShape({-1})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams4() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
3,
1,
true,
{DT_INT64},
{PartialTensorShape({3})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams5() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
12,
1,
true,
{DT_INT64},
{PartialTensorShape({12})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams6() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
12,
1,
false,
{DT_INT64},
{PartialTensorShape({-1})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams7() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 0, 1),
4,
1,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams8() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
2,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams9() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
4,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams10() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
1,
false,
{DT_INT64},
{PartialTensorShape({4})},
true,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams InvalidBatchSizeParallelBatchDatasetParams() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
-1,
1,
false,
{DT_INT64},
{PartialTensorShape({3})},
false,
DeterminismPolicy::kNondeterministic,
kNodeName);
}
std::vector<GetNextTestCase<ParallelBatchDatasetParams>> GetNextTestCases() {
return {
{ParallelBatchDatasetParams1(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams2(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{ParallelBatchDatasetParams4(),
CreateTensors<int64_t>(TensorShape({3}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{ParallelBatchDatasetParams5(),
{}},
{ParallelBatchDatasetParams6(),
CreateTensors<int64_t>(TensorShape({10}),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}})},
{ParallelBatchDatasetParams7(),
{}},
{ParallelBatchDatasetParams8(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams9(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams10(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelBatchDatasetOpTest, ParallelBatchDatasetParams,
GetNextTestCases())
TEST_F(ParallelBatchDatasetOpTest, DatasetNodeName) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(parallel_batch_dataset_params.node_name()));
}
TEST_F(ParallelBatchDatasetOpTest, DatasetTypeString) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
name_utils::OpNameParams params;
params.op_version = parallel_batch_dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelBatchDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelBatchDatasetOpTest, DatasetOutputDtypes) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<ParallelBatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ParallelBatchDatasetParams1(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams2(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams3(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams4(),
{PartialTensorShape({3})}},
{ParallelBatchDatasetParams5(),
{PartialTensorShape({12})}},
{ParallelBatchDatasetParams6(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams7(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams8(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams9(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams10(),
{PartialTensorShape({4})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ParallelBatchDatasetParams>>
CardinalityTestCases() {
return {{ParallelBatchDatasetParams1(),
3},
{ParallelBatchDatasetParams2(),
3},
{ParallelBatchDatasetParams3(),
4},
{ParallelBatchDatasetParams4(),
3},
{ParallelBatchDatasetParams5(),
0},
{ParallelBatchDatasetParams6(),
1},
{ParallelBatchDatasetParams7(),
0},
{ParallelBatchDatasetParams8(),
3},
{ParallelBatchDatasetParams9(),
3},
{ParallelBatchDatasetParams10(),
3}};
}
DATASET_CARDINALITY_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams, CardinalityTestCases())
TEST_F(ParallelBatchDatasetOpTest, IteratorOutputDtypes) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<ParallelBatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ParallelBatchDatasetParams1(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams2(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams3(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams4(),
{PartialTensorShape({3})}},
{ParallelBatchDatasetParams5(),
{PartialTensorShape({12})}},
{ParallelBatchDatasetParams6(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams7(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams8(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams9(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams10(),
{PartialTensorShape({4})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ParallelBatchDatasetOpTest, IteratorOutputPrefix) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = parallel_batch_dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ParallelBatchDatasetOp::kDatasetType,
parallel_batch_dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelBatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{ParallelBatchDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams3(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{ParallelBatchDatasetParams4(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8})}},
{ParallelBatchDatasetParams5(),
{0, 1, 5},
{}},
{ParallelBatchDatasetParams6(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({10}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})}},
{ParallelBatchDatasetParams7(),
{0, 1, 5},
{}},
{ParallelBatchDatasetParams8(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams9(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams10(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelBatchDatasetOpTest, InvalidParallelBatchSize) {
auto parallel_batch_dataset_params =
InvalidBatchSizeParallelBatchDatasetParams();
EXPECT_EQ(Initialize(parallel_batch_dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_batch_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_batch_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df71feca-e632-41c2-9e0a-72b8077361c8 | cpp | tensorflow/tensorflow | map_defun_op | tensorflow/core/kernels/data/map_defun_op.cc | tensorflow/core/kernels/data/map_defun_op_test.cc | #include "tensorflow/core/kernels/data/map_defun_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/batch_util.h"
#include "tensorflow/core/util/reffed_status_callback.h"
namespace tensorflow {
namespace data {
constexpr const char* const MapDefunOp::kArguments;
constexpr const char* const MapDefunOp::kCapturedInputs;
constexpr const char* const MapDefunOp::kTarguments;
constexpr const char* const MapDefunOp::kTcaptured;
constexpr const char* const MapDefunOp::kOutputTypes;
constexpr const char* const MapDefunOp::kOutputShapes;
constexpr const char* const MapDefunOp::kFunc;
constexpr const char* const MapDefunOp::kMaxIntraOpParallelism;
constexpr char kOutput[] = "output";
struct MapDefunOp::ComputeOptions {
OpInputList args;
const std::vector<TensorShape> arg_shapes;
OpInputList captured_inputs;
const int64_t batch_size;
std::function<void(std::function<void()>)> runner;
std::vector<PartialTensorShape> output_shapes TF_GUARDED_BY(mu);
OpOutputList output TF_GUARDED_BY(mu);
mutex mu;
ComputeOptions(OpKernelContext* ctx, OpInputList args,
OpInputList captured_inputs,
std::vector<TensorShape> arg_shapes, int64_t batch_size,
const std::vector<PartialTensorShape>& output_shapes_attr,
int max_parallelism)
: args(args),
arg_shapes(std::move(arg_shapes)),
captured_inputs(captured_inputs),
batch_size(batch_size),
output_shapes(output_shapes_attr) {
if (max_parallelism >= 1) {
runner = RunnerWithMaxParallelism(*ctx->runner(), max_parallelism);
}
}
};
class MapDefunOp::MapFunctionCallFrame : public CallFrameInterface {
public:
MapFunctionCallFrame(ComputeOptions* compute_opts, OpKernel* kernel,
size_t iter)
: compute_opts_(compute_opts),
kernel_(kernel),
iter_(iter),
sliced_args_(compute_opts_->args.size()) {}
~MapFunctionCallFrame() override = default;
size_t num_args() const override {
return compute_opts_->args.size() + compute_opts_->captured_inputs.size();
}
size_t num_retvals() const override {
return static_cast<size_t>(kernel_->num_outputs());
}
Status GetArg(int index, const Tensor** val) override {
if (index < 0 || index >= compute_opts_->args.size() +
compute_opts_->captured_inputs.size()) {
return errors::InvalidArgument("Mismatch in number of function inputs.");
}
if (index >= compute_opts_->args.size()) {
*val =
&compute_opts_->captured_inputs[index - compute_opts_->args.size()];
return absl::OkStatus();
}
mutex_lock l(mu_);
bool result = sliced_args_[index].CopyFrom(
compute_opts_->args[index].Slice(iter_, iter_ + 1),
compute_opts_->arg_shapes.at(index));
if (!result) {
return errors::Internal("GetArg failed.");
} else if (!sliced_args_[index].IsAligned()) {
sliced_args_[index] = tensor::DeepCopy(sliced_args_[index]);
}
*val = &sliced_args_[index];
return absl::OkStatus();
}
Status SetRetval(int index, const Tensor& val) override {
if (index < 0 || index >= kernel_->num_outputs()) {
return errors::InvalidArgument("Mismatch in number of function outputs.");
}
if (val.dtype() != kernel_->output_type(index)) {
return errors::InvalidArgument(
"Mismatch in function return type and expected output type for "
"output: ",
index);
}
Tensor* out;
{
mutex_lock l(compute_opts_->mu);
if (!compute_opts_->output_shapes.at(index).IsCompatibleWith(
val.shape())) {
return errors::InvalidArgument(
"Mismatch in function retval shape, ", val.shape(),
", and expected output shape, ",
compute_opts_->output_shapes.at(index).DebugString(), ".");
}
if (!compute_opts_->output_shapes.at(index).IsFullyDefined()) {
compute_opts_->output_shapes.at(index) = val.shape();
TensorShape actual_shape = val.shape();
actual_shape.InsertDim(0, compute_opts_->batch_size);
TF_RETURN_IF_ERROR(
compute_opts_->output.allocate(index, actual_shape, &out));
} else {
out = (compute_opts_->output)[index];
}
}
return batch_util::CopyElementToSlice(val, out, iter_);
}
private:
ComputeOptions* const compute_opts_;
const OpKernel* kernel_;
const size_t iter_;
mutex mu_;
std::vector<Tensor> sliced_args_ TF_GUARDED_BY(mu_);
};
MapDefunOp::MapDefunOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx) {
auto func_lib = ctx->function_library();
OP_REQUIRES(ctx, func_lib != nullptr,
errors::Internal("No function library."));
const NameAttrList* func;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kFunc, &func));
OP_REQUIRES_OK(ctx,
func_lib->Instantiate(func->name(), AttrSlice(&func->attr()),
&func_handle_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kMaxIntraOpParallelism, &max_intra_op_parallelism_));
OP_REQUIRES(ctx, ctx->num_inputs() >= 0,
errors::InvalidArgument("Must have at least one input."));
OP_REQUIRES(ctx, ctx->num_outputs() >= 0,
errors::InvalidArgument("Must have at least one output."));
OP_REQUIRES(ctx, ctx->num_outputs() == output_shapes_.size(),
errors::InvalidArgument(
"Length of output_shapes and output_types must match."));
}
void MapDefunOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) {
ComputeOptions* compute_opts = nullptr;
OP_REQUIRES_OK_ASYNC(ctx, SetupArgs(ctx, &compute_opts), done);
Status s = SetupOutputs(ctx, compute_opts);
if (!s.ok()) delete compute_opts;
OP_REQUIRES_OK_ASYNC(ctx, s, done);
FunctionLibraryRuntime::Options opts;
SetRunOptions(ctx, &opts, compute_opts, false);
StatusCallback callback = std::bind(
[](OpKernelContext* ctx, ComputeOptions* compute_opts, DoneCallback& done,
const Status& status) {
delete compute_opts;
ctx->SetStatus(status);
done();
},
ctx, compute_opts, std::move(done), std::placeholders::_1);
auto* refcounted = new ReffedStatusCallback(std::move(callback));
CancellationManager* parent_mgr = ctx->cancellation_manager();
for (size_t i = 0; i < static_cast<size_t>(compute_opts->batch_size); ++i) {
CancellationManager* c_mgr = new CancellationManager(parent_mgr);
opts.cancellation_manager = c_mgr;
auto* call_frame = new MapFunctionCallFrame(compute_opts, this, i);
refcounted->Ref();
ctx->function_library()->Run(
opts, func_handle_, call_frame,
[call_frame, refcounted, c_mgr](const Status& func_status) {
delete c_mgr;
delete call_frame;
refcounted->UpdateStatus(func_status);
refcounted->Unref();
});
}
refcounted->Unref();
}
void MapDefunOp::SetRunOptions(OpKernelContext* ctx,
FunctionLibraryRuntime::Options* opts,
ComputeOptions* compute_opts,
bool always_collect_stats) {
opts->rendezvous = ctx->rendezvous();
if (always_collect_stats) {
opts->stats_collector = ctx->stats_collector();
}
if (max_intra_op_parallelism_ >= 1) {
opts->runner = &compute_opts->runner;
} else {
opts->runner = ctx->runner();
}
opts->run_all_kernels_inline = ctx->run_all_kernels_inline();
}
Status MapDefunOp::SetupArgs(OpKernelContext* ctx,
ComputeOptions** compute_opts) {
OpInputList arguments;
TF_RETURN_IF_ERROR(ctx->input_list(kArguments, &arguments));
OpInputList captured_inputs;
TF_RETURN_IF_ERROR(ctx->input_list(kCapturedInputs, &captured_inputs));
int64_t batch_size = arguments[0].dims() > 0 ? arguments[0].dim_size(0) : -1;
for (size_t i = 0; i < arguments.size(); ++i) {
if (arguments[i].dims() == 0) {
return errors::InvalidArgument(
"All inputs must have rank at least 1. Input ", i,
" has a rank of 0.");
} else if (arguments[i].dim_size(0) != batch_size) {
return errors::InvalidArgument(
"All inputs must have the same dimension 0. Input ", i,
" has leading dimension ", ctx->input(i).dim_size(0),
", while all previous inputs have leading dimension ", batch_size);
}
}
std::vector<TensorShape> arg_shapes;
arg_shapes.reserve(arguments.size());
for (size_t i = 0; i < arguments.size(); ++i) {
arg_shapes.push_back(arguments[i].shape());
arg_shapes.at(i).RemoveDim(0);
}
*compute_opts =
new ComputeOptions(ctx, arguments, captured_inputs, std::move(arg_shapes),
batch_size, output_shapes_, max_intra_op_parallelism_);
return absl::OkStatus();
}
Status MapDefunOp::SetupOutputs(OpKernelContext* ctx, ComputeOptions* opts) {
mutex_lock l(opts->mu);
TF_RETURN_IF_ERROR(ctx->output_list(kOutput, &opts->output));
for (size_t i = 0; i < output_types().size(); ++i) {
if (output_shapes_.at(i).IsFullyDefined()) {
Tensor* out = nullptr;
TensorShape output_shape;
output_shapes_.at(i).AsTensorShape(&output_shape);
output_shape.InsertDim(0, opts->batch_size);
TF_RETURN_IF_ERROR(opts->output.allocate(i, output_shape, &out));
}
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("MapDefun").Device(DEVICE_CPU), MapDefunOp);
}
}
} | #include "tensorflow/core/kernels/data/map_defun_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "map_defun";
constexpr char kOpName[] = "MapDefun";
class MapDefunOpParams : public DatasetParams {
public:
MapDefunOpParams(std::vector<Tensor> arguments,
std::vector<Tensor> captured_inputs,
DataTypeVector type_arguments, DataTypeVector type_captured,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
int max_intra_op_parallelism, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
arguments_(std::move(arguments)),
captured_inputs_(std::move(captured_inputs)),
type_arguments_(std::move(type_arguments)),
type_captured_(std::move(type_captured)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
max_intra_op_parallelism_(max_intra_op_parallelism) {}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = arguments_;
input_tensors.insert(input_tensors.end(), captured_inputs_.begin(),
captured_inputs_.end());
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(arguments_.size() + captured_inputs_.size());
for (int i = 0; i < arguments_.size(); ++i) {
input_names->emplace_back(
strings::StrCat(MapDefunOp::kArguments, "_", i));
}
for (int i = 0; i < captured_inputs_.size(); ++i) {
input_names->emplace_back(
strings::StrCat(MapDefunOp::kCapturedInputs, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{MapDefunOp::kTarguments, type_arguments_},
{MapDefunOp::kTcaptured, type_captured_},
{MapDefunOp::kOutputShapes, output_shapes_},
{MapDefunOp::kOutputTypes, output_dtypes_},
{MapDefunOp::kFunc, func_},
{MapDefunOp::kMaxIntraOpParallelism, max_intra_op_parallelism_}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override { return "MapDef"; }
private:
std::vector<Tensor> arguments_;
std::vector<Tensor> captured_inputs_;
DataTypeVector type_arguments_;
DataTypeVector type_captured_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
int max_intra_op_parallelism_;
};
class MapDefunOpTest : public DatasetOpsTestBase {
protected:
Status CreateMapDefunOpKernel(const MapDefunOpParams& params,
std::unique_ptr<OpKernel>* map_defun_kernel) {
std::vector<string> input_namess;
TF_RETURN_IF_ERROR(params.GetInputNames(&input_namess));
AttributeVector attributes;
TF_RETURN_IF_ERROR(params.GetAttributes(&attributes));
NodeDef node_def =
test::function::NDef(kNodeName, kOpName, input_namess, attributes);
TF_RETURN_IF_ERROR(CreateOpKernel(node_def, map_defun_kernel));
return absl::OkStatus();
}
Status CreateMapDefunContext(
OpKernel* const op_kernel,
absl::InlinedVector<TensorValue, 4UL>* const inputs,
std::unique_ptr<OpKernelContext>* context) {
TF_RETURN_IF_ERROR(CheckOpKernelInput(*op_kernel, *inputs));
TF_RETURN_IF_ERROR(CreateOpKernelContext(op_kernel, inputs, context));
return absl::OkStatus();
}
};
struct TestCase {
MapDefunOpParams map_defun_op_params;
std::vector<Tensor> expected_outputs;
};
TestCase TestCase1() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{},
{DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}})},
{test::function::XTimesTwo()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}), {0, 2, 4, 6, 8, 10})}};
}
TestCase TestCase2() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 10, 20, 30, 40, 50})},
{},
{DT_INT64, DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}), {0, 11, 22, 33, 44, 55})}};
}
TestCase TestCase3() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}),
{10, 101, 12, 103, 14, 105})}};
}
TestCase InvalidOutputTypes() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_FLOAT},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
TestCase InvalidOutputShapes() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_INT64},
{PartialTensorShape({2, 2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
TestCase InvalidInputs() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5}),
CreateTensor<int64_t>(TensorShape({2, 2}),
{0, 1, 2, 3})},
{},
{DT_INT64, DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
class ParameterizedMapDefunOpTest
: public MapDefunOpTest,
public ::testing::WithParamInterface<TestCase> {};
TEST_P(ParameterizedMapDefunOpTest, NormalTests) {
TestCase test_case = GetParam();
TF_ASSERT_OK(InitializeRuntime(test_case.map_defun_op_params));
auto input_tensors = test_case.map_defun_op_params.GetInputTensors();
absl::InlinedVector<TensorValue, 4UL> input_values;
for (auto& input : input_tensors) {
input_values.push_back(TensorValue(&input));
}
std::unique_ptr<OpKernel> map_defun_kernel;
TF_ASSERT_OK(
CreateMapDefunOpKernel(test_case.map_defun_op_params, &map_defun_kernel));
std::unique_ptr<OpKernelContext> context;
TF_ASSERT_OK(
CreateMapDefunContext(map_defun_kernel.get(), &input_values, &context));
TF_ASSERT_OK(RunOpKernel(map_defun_kernel.get(), context.get()));
EXPECT_EQ(context->num_outputs(), test_case.expected_outputs.size());
for (int i = 0; i < context->num_outputs(); ++i) {
TF_EXPECT_OK(ExpectEqual(*context->mutable_output(i),
test_case.expected_outputs[i]));
}
}
INSTANTIATE_TEST_SUITE_P(MapDefunOpTest, ParameterizedMapDefunOpTest,
::testing::ValuesIn(std::vector<TestCase>(
{TestCase1(), TestCase2(), TestCase3()})));
TEST_F(MapDefunOpTest, InvalidArguments) {
std::vector<TestCase> test_cases = {InvalidOutputTypes(),
InvalidOutputShapes(), InvalidInputs()};
for (auto& test_case : test_cases) {
TF_ASSERT_OK(InitializeRuntime(test_case.map_defun_op_params));
auto input_tensors = test_case.map_defun_op_params.GetInputTensors();
absl::InlinedVector<TensorValue, 4UL> input_values;
for (auto& input : input_tensors) {
input_values.push_back(TensorValue(&input));
}
std::unique_ptr<OpKernel> map_defun_kernel;
TF_ASSERT_OK(CreateMapDefunOpKernel(test_case.map_defun_op_params,
&map_defun_kernel));
std::unique_ptr<OpKernelContext> context;
TF_ASSERT_OK(
CreateMapDefunContext(map_defun_kernel.get(), &input_values, &context));
EXPECT_EQ(RunOpKernel(map_defun_kernel.get(), context.get()).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/map_defun_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/map_defun_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d3fa6f16-f4b2-4a17-85df-c9db628b7db2 | cpp | tensorflow/tensorflow | tensor_slice_dataset_op | tensorflow/core/kernels/data/tensor_slice_dataset_op.cc | tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc | #include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const TensorSliceDatasetOp::kDatasetType;
constexpr const char* const TensorSliceDatasetOp::kComponents;
constexpr const char* const TensorSliceDatasetOp::kToutputTypes;
constexpr const char* const TensorSliceDatasetOp::kOutputShapes;
constexpr const char* const TensorSliceDatasetOp::kIsFiles;
constexpr const char* const
TensorSliceDatasetOp::kReplicateOnSplit;
class TensorSliceDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors,
bool is_files, bool replicate_on_split)
: DatasetBase(DatasetContext(ctx)),
tensors_(std::move(tensors)),
is_files_(is_files),
replicate_on_split_(replicate_on_split) {
for (const Tensor& t : tensors_) {
dtypes_.push_back(t.dtype());
absl::InlinedVector<int64_t, 4UL> element_dim_sizes;
for (int i = 1; i < t.dims(); ++i) {
element_dim_sizes.push_back(t.dim_size(i));
}
partial_shapes_.emplace_back(element_dim_sizes);
shapes_.emplace_back(std::move(element_dim_sizes));
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<IndexSplitProvider>(tensors_[0].dim_size(0)));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return partial_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return tensors_[0].dim_size(0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->clear();
out_tensors->reserve(tensors_.size());
for (int i = 0; i < tensors_.size(); ++i) {
out_tensors->push_back(MaybeCopySubSlice(tensors_[i], index));
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> components;
components.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
if (is_files_) {
Node* file_node;
TF_RETURN_IF_ERROR(
b->AddIdentity(ctx, "FileIdentity", &node, &file_node));
}
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
components.emplace_back(node);
}
AttrValue dtypes;
b->BuildAttrValue(dtypes_, &dtypes);
AttrValue is_files;
b->BuildAttrValue(is_files_, &is_files);
AttrValue replicate_on_split;
b->BuildAttrValue(replicate_on_split_, &replicate_on_split);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}},
{{kToutputTypes, dtypes},
{kIsFiles, is_files},
{kReplicateOnSplit, replicate_on_split}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty() || dataset()->replicate_on_split_) {
split_provider_ = std::make_shared<IndexSplitProvider>(
dataset()->tensors_[0].dim_size(0));
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
int64_t index = split.scalar<int64_t>()();
out_tensors->reserve(dataset()->tensors_.size());
for (size_t i = 0; i < dataset()->tensors_.size(); ++i) {
out_tensors->push_back(
MaybeCopySubSlice(dataset()->tensors_[i], index));
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(split_provider_->Save(
[this](const std::string& key) { return full_name(key); }, writer));
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
return split_provider_->Restore(
[this](const std::string& key) { return full_name(key); }, reader);
}
private:
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
DataTypeVector dtypes_;
std::vector<TensorShape> shapes_;
std::vector<PartialTensorShape> partial_shapes_;
const bool is_files_;
const bool replicate_on_split_;
};
TensorSliceDatasetOp::TensorSliceDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
if (ctx->HasAttr(kIsFiles)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kIsFiles, &is_files_));
}
if (ctx->HasAttr(kReplicateOnSplit)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kReplicateOnSplit, &replicate_on_split_));
}
}
void TensorSliceDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs));
std::vector<Tensor> components;
components.reserve(inputs.size());
OP_REQUIRES(
ctx, inputs[0].dims() > 0,
errors::InvalidArgument("All components must be at least 1-dimensional"));
const int64_t num_slices = inputs[0].dim_size(0);
for (const Tensor& t : inputs) {
components.push_back(t);
OP_REQUIRES(ctx, t.dims() > 0,
errors::InvalidArgument(
"All components must be at least 1-dimensional"));
OP_REQUIRES(
ctx, t.dim_size(0) == num_slices,
errors::InvalidArgument(
"All components must have the same size in the 0th dimension"));
}
*output =
new Dataset(ctx, std::move(components), is_files_, replicate_on_split_);
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TensorSliceDataset").Device(DEVICE_CPU),
TensorSliceDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_slice_dataset";
class TensorSliceDatasetOpTest : public DatasetOpsTestBase {};
TensorSliceDatasetParams PlainTensorSliceDatasetParams() {
std::vector<Tensor> components = {
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<int64_t>(TensorShape({2, 2}), {1, 2, 3, 4}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint32>(TensorShape({2, 2}), {2, 3, 4, 5}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<uint64>(TensorShape({2, 2}), {3, 4, 5, 6}),
CreateTensor<double>(TensorShape({2, 1}), {37.0, 38.0}),
CreateTensor<tstring>(TensorShape({2, 1}), {"a", "b"})};
return {std::move(components), kNodeName};
}
TensorSliceDatasetParams NestedTensorSliceDatasetParams() {
std::vector<Tensor> components = {
CreateTensor<Variant>(
TensorShape({2, 1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0}),
CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({2, 1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"}),
CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6})};
return {std::move(components), kNodeName};
}
std::vector<GetNextTestCase<TensorSliceDatasetParams>> GetNextTestCases() {
return {
{PlainTensorSliceDatasetParams(),
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedTensorSliceDatasetParams(),
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedGetNextTest
: public TensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<TensorSliceDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::vector<string> input_names;
TF_ASSERT_OK(test_case.dataset_params.GetInputNames(&input_names));
size_t num_tensors_per_slice = input_names.size();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_slice = 0;
while (true) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (end_of_sequence) {
EXPECT_TRUE(out_tensors.empty());
break;
}
for (int i = 0; i < out_tensors.size(); ++i) {
EXPECT_LT(i + num_tensors_per_slice * cur_slice,
test_case.expected_outputs.size());
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]));
}
}
cur_slice++;
}
}
INSTANTIATE_TEST_SUITE_P(TensorSliceDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(TensorSliceDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TensorSliceDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TensorSliceDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<TensorSliceDatasetParams>>
DatasetOutputTypesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_dtypes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetOutputTypesTestCases())
std::vector<DatasetOutputShapesTestCase<TensorSliceDatasetParams>>
DatasetOutputShapesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_shapes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<TensorSliceDatasetParams>>
DatasetCardinalityTestCases() {
return {{PlainTensorSliceDatasetParams(), 2},
{NestedTensorSliceDatasetParams(), 2}};
}
DATASET_CARDINALITY_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<TensorSliceDatasetParams>>
IteratorOutputTypesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_dtypes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest,
TensorSliceDatasetParams,
IteratorOutputTypesTestCases())
std::vector<IteratorOutputShapesTestCase<TensorSliceDatasetParams>>
IteratorOutputShapesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_shapes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest,
TensorSliceDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(TensorSliceDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
TensorSliceDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PlainTensorSliceDatasetParams(),
{0, 1, 2},
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedTensorSliceDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public TensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_context;
TF_ASSERT_OK(CreateSerializationContext(&serialization_context));
int cur_iteration = 0;
bool end_of_sequence = false;
auto params =
static_cast<TensorSliceDatasetParams&>(test_case.dataset_params);
int64_t num_slices = params.num_slices();
size_t num_tensors_per_slice = params.num_tensors_per_slice();
std::vector<Tensor> out_tensors;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_slices) {
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output =
out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i +
num_tensors_per_slice * (cur_iteration - 1)]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_slice *
(cur_iteration - 1)]));
}
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, "Iterator",
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_SUITE_P(
TensorSliceDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(TensorSliceDatasetOpTest, SplitProvider) {
auto params = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape({7}), {{6, 2, 3, 8, 7, 0, 10}}),
kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}),
{{6}, {2}, {3}, {8}, {7}, {0}, {10}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {{2}, {7}})));
}
TEST_F(TensorSliceDatasetOpTest, SplitProviderEmpty) {
auto params = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape({0}), {{}}), kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
040cc8ee-f53e-4ba5-b169-1de71bba8616 | cpp | tensorflow/tensorflow | parallel_interleave_dataset_op | tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc | tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.h"
#include <atomic>
#include <deque>
#include <functional>
#include <string>
#include <utility>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const
ParallelInterleaveDatasetOp::kDatasetType;
constexpr const char* const
ParallelInterleaveDatasetOp::kInputDataset;
constexpr const char* const
ParallelInterleaveDatasetOp::kOtherArguments;
constexpr const char* const
ParallelInterleaveDatasetOp::kCycleLength;
constexpr const char* const
ParallelInterleaveDatasetOp::kBlockLength;
constexpr const char* const
ParallelInterleaveDatasetOp::kDeterministic;
constexpr const char* const ParallelInterleaveDatasetOp::kSloppy;
constexpr const char* const
ParallelInterleaveDatasetOp::kBufferOutputElements;
constexpr const char* const
ParallelInterleaveDatasetOp::kPrefetchInputElements;
constexpr const char* const ParallelInterleaveDatasetOp::kFunc;
constexpr const char* const
ParallelInterleaveDatasetOp::kTarguments;
constexpr const char* const
ParallelInterleaveDatasetOp::kOutputTypes;
constexpr const char* const
ParallelInterleaveDatasetOp::kOutputShapes;
constexpr char kInputExhausted[] = "input_exhausted";
constexpr char kNextIndex[] = "next_index";
constexpr char kBlockCount[] = "block_count";
constexpr char kWorkersSize[] = "workers_size";
constexpr char kInterleaveSize[] = "interleave_size";
constexpr char kInterleaveIndices[] = "interleave_indices";
constexpr char kStagingSize[] = "staging_size";
constexpr char kStagingIndices[] = "staging_indices";
constexpr char kWorkerThreadsRunning[] = "worker_threads_running";
constexpr char kDataParallelInterleaveWorker[] =
"data_parallel_interleave_worker";
constexpr char kWorker[] = "worker";
constexpr char kInputSize[] = "input_size";
constexpr char kInput[] = "input";
constexpr char kOutputsSize[] = "outputs_size";
constexpr char kOutputs[] = "outputs";
constexpr char kIsProducing[] = "is_producing";
constexpr char kWorkerThread[] = "worker_thread";
constexpr char kIteratorExhausted[] = "iterator_exhausted";
constexpr char kIteratorCreationStatus[] = "iterator_creation_status";
constexpr char kOutput[] = "output";
constexpr char kEndOfSequence[] = "end_of_sequence";
constexpr char kStatus[] = "status";
constexpr char kOutputSize[] = "output_size";
constexpr char kCode[] = "code";
constexpr char KMessage[] = "msg";
class ParallelInterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func, int64_t cycle_length,
int64_t block_length, DeterminismPolicy deterministic,
int64_t buffer_output_elements, int64_t prefetch_input_elements,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes, int op_version)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
cycle_length_(cycle_length),
block_length_(block_length),
deterministic_(deterministic),
buffer_output_elements_(buffer_output_elements),
prefetch_input_elements_(prefetch_input_elements),
output_types_(output_types),
output_shapes_(output_shapes),
traceme_metadata_(
{{"block_length",
strings::Printf("%lld", static_cast<long long>(block_length))},
{"cycle_length",
strings::Printf("%lld", static_cast<long long>(cycle_length))},
{"deterministic",
deterministic.IsDeterministic() || deterministic.IsDefault()
? "true"
: "false"}}),
op_version_(op_version) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
bool deterministic =
deterministic_.IsDeterministic() || deterministic_.IsDefault();
return std::make_unique<Iterator>(
Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)},
deterministic);
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(kDatasetType, params);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<std::pair<size_t, Node*>> inputs;
std::vector<std::pair<size_t, absl::Span<Node* const>>> list_inputs;
int input_index = 0;
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
inputs.emplace_back(input_index++, input_node);
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
list_inputs.emplace_back(input_index++, other_arguments);
Node* cycle_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(cycle_length_, &cycle_length_node));
inputs.emplace_back(input_index++, cycle_length_node);
Node* block_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(block_length_, &block_length_node));
inputs.emplace_back(input_index++, block_length_node);
if (op_version_ == 1) {
Node* sloppy_node;
TF_RETURN_IF_ERROR(
b->AddScalar(deterministic_.IsNondeterministic(), &sloppy_node));
inputs.emplace_back(input_index++, sloppy_node);
}
Node* buffer_output_elements_node;
TF_RETURN_IF_ERROR(
b->AddScalar(buffer_output_elements_, &buffer_output_elements_node));
inputs.emplace_back(input_index++, buffer_output_elements_node);
Node* prefetch_input_elements_node;
TF_RETURN_IF_ERROR(
b->AddScalar(prefetch_input_elements_, &prefetch_input_elements_node));
inputs.emplace_back(input_index++, prefetch_input_elements_node);
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
attrs.emplace_back(kFunc, f);
if (op_version_ == 2) {
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
}
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
attrs.emplace_back(kTarguments, other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(this, inputs, list_inputs, attrs, output));
return absl::OkStatus();
}
private:
int64_t num_threads() const {
return cycle_length_ + prefetch_input_elements_;
}
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params, bool deterministic)
: DatasetIterator<Dataset>(params),
deterministic_(deterministic),
workers_(dataset()->num_threads()),
worker_thread_states_(dataset()->num_threads()) {}
~Iterator() override {
CancelThreads();
if (deregister_fn_) deregister_fn_();
}
Status Initialize(IteratorContext* ctx) override {
cancellation_manager_ = std::make_unique<CancellationManager>();
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
IteratorContext(params), this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(EnsureWorkerThreadsStarted(ctx));
while (!cancelled_) {
bool can_produce_elements = false;
bool must_wait_for_input = true;
for (int64_t i = 0; i < interleave_indices_.size(); ++i) {
int64_t index = (next_index_ + i) % interleave_indices_.size();
int64_t current_worker_index = interleave_indices_[index];
if (current_worker_index < 0) {
continue;
}
WorkerState* current_worker = &workers_[current_worker_index];
can_produce_elements |= current_worker->MayHaveElements();
if (!current_worker->outputs.empty()) {
next_index_ = index;
const bool element_acquired_sloppily = !deterministic_ && i > 1;
if (!element_acquired_sloppily) {
block_count_++;
if (block_count_ == dataset()->block_length_) {
next_index_ = (index + 1) % interleave_indices_.size();
block_count_ = 0;
}
} else {
block_count_ = 0;
}
*end_of_sequence = false;
Status s = current_worker->outputs.front().status;
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode(
"ParallelInterleaveConsume",
{{"element_id", current_worker->outputs.front().id}});
});
current_worker->outputs.front().output.swap(*out_tensors);
current_worker->outputs.pop_front();
current_worker->cond_var.notify_one();
return s;
} else if (current_worker->is_producing && deterministic_) {
if (next_index_ != index) {
next_index_ = index;
block_count_ = 0;
}
break;
} else if (!current_worker->is_producing) {
interleave_indices_[index] = -1;
if (input_impl_) {
std::vector<Tensor> args;
bool end_of_input = false;
Status s = input_impl_->GetNext(ctx, &args, &end_of_input);
if (end_of_input) {
input_impl_.reset();
} else {
current_worker->SetInputs(s, std::move(args));
staging_indices_.emplace_back(current_worker_index);
}
}
if (!staging_indices_.empty()) {
interleave_indices_[index] = staging_indices_.front();
staging_indices_.pop_front();
next_index_ = (index + 1) % interleave_indices_.size();
block_count_ = 0;
can_produce_elements = true;
must_wait_for_input = false;
break;
}
}
}
if (!can_produce_elements && !input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (must_wait_for_input) {
RecordStop(ctx);
if (deterministic_) {
workers_[interleave_indices_[next_index_]].cond_var.wait(l);
} else {
any_element_available_cond_var_.wait(l);
}
RecordStart(ctx);
}
}
return errors::Cancelled(
"ParallelInterleaveDatasetOp::Dataset::Iterator::GetNext");
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncInterleaveManyNode(
std::move(args), {model::MakeNonTunableParameter(
kCycleLength, dataset()->cycle_length_),
model::MakeNonTunableParameter(
kDeterministic, deterministic_ ? 1.0 : 0.0)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputExhausted, ""));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBlockCount, block_count_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kWorkersSize, workers_.size()));
for (int i = 0; i < workers_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteWorkerStateLocked(writer, i));
}
for (int i = 0; i < worker_thread_states_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteWorkerThreadStateLocked(ctx, writer, i));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInterleaveSize,
interleave_indices_.size()));
for (int i = 0; i < interleave_indices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kInterleaveIndices, "_", i),
interleave_indices_[i]));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kStagingSize, staging_indices_.size()));
for (int i = 0; i < staging_indices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kStagingIndices, "_", i),
staging_indices_[i]));
}
if (!worker_threads_.empty()) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kWorkerThreadsRunning, ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
{
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (!reader->Contains(prefix(), kInputExhausted)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNextIndex, &temp));
next_index_ = size_t(temp);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kBlockCount, &temp));
block_count_ = size_t(temp);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kWorkersSize, &temp));
if (temp != dataset()->num_threads()) {
return errors::Internal("Expected ", dataset()->num_threads(),
" worker states but found ", temp, ".");
}
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
TF_RETURN_IF_ERROR(ReadWorkerStateLocked(ctx, reader, i));
}
}
std::unique_ptr<thread::ThreadPool> threadpool = ctx->CreateThreadPool(
"read_worker_thread_state", dataset()->num_threads());
Status s = absl::OkStatus();
BlockingCounter counter(dataset()->num_threads());
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
threadpool->Schedule([this, i, ctx, reader, &s, &counter] {
WorkerThreadState state;
Status result = ReadWorkerThreadStateLocked(ctx, reader, i, &state);
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (!result.ok()) {
s.Update(result);
counter.DecrementCount();
return;
}
worker_thread_states_[i] = std::move(state);
counter.DecrementCount();
});
}
counter.Wait();
if (!s.ok()) {
return s;
}
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
std::set<int64_t> all_indices;
{
int64_t interleave_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInterleaveSize, &interleave_size));
interleave_indices_.reserve(interleave_size);
for (int64_t i = 0; i < interleave_size; ++i) {
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kInterleaveIndices, "_", i), &temp));
if (temp >= 0 && all_indices.find(temp) != all_indices.end()) {
return errors::Internal(
"Duplicate entry for ", temp,
" found when reading interleave and staging indices.");
}
if (temp >= 0) {
all_indices.insert(temp);
}
interleave_indices_.emplace_back(temp);
}
}
{
int64_t staging_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kStagingSize, &staging_size));
for (int i = 0; i < staging_size; ++i) {
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kStagingIndices, "_", i), &temp));
if (all_indices.find(temp) != all_indices.end()) {
return errors::Internal(
"Duplicate entry for ", temp,
" found when reading interleave and staging indices.");
}
if (temp >= 0) {
all_indices.insert(temp);
}
staging_indices_.emplace_back(temp);
}
}
if (reader->Contains(prefix(), kWorkerThreadsRunning)) {
worker_threads_.reserve(dataset()->num_threads());
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
std::shared_ptr<IteratorContext> new_ctx(new IteratorContext(*ctx));
worker_threads_.emplace_back(ctx->StartThread(
strings::StrCat(kDataParallelInterleaveWorker, "_", i),
[this, new_ctx, i]() { WorkerThread(new_ctx, i); }));
}
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
struct OutputElem {
Status status;
std::vector<Tensor> output;
int64_t id = -1;
explicit OutputElem(const Status& s) : status(s) {}
OutputElem(const Status& s, int64_t id) : status(s), id(id) {}
};
struct WorkerState {
std::vector<Tensor> input;
std::deque<OutputElem> outputs;
bool is_producing = false;
condition_variable cond_var;
inline bool MayHaveElements() const {
return is_producing || !outputs.empty();
}
void SetInputs(const Status& s, std::vector<Tensor> input_arguments) {
if (s.ok()) {
DCHECK(!MayHaveElements())
<< "Tried to start inputs, despite already producing!";
input = std::move(input_arguments);
is_producing = true;
cond_var.notify_one();
} else {
outputs.emplace_back(s);
}
}
};
struct WorkerThreadState {
OutputElem output_elem;
bool end_of_sequence = false;
Status iterator_creation_status;
std::vector<Tensor> input;
std::unique_ptr<IteratorBase> iterator;
WorkerThreadState() : output_elem(absl::OkStatus()) {}
};
void CancelThreads() TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(mu_);
cancelled_ = true;
for (auto& worker : workers_) {
worker.cond_var.notify_all();
}
}
Status EnsureWorkerThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (worker_threads_.empty() && input_impl_) {
worker_threads_.reserve(dataset()->num_threads());
for (int64_t i = 0; i < dataset()->num_threads(); ++i) {
std::vector<Tensor> args;
bool end_of_input = false;
Status s = input_impl_->GetNext(ctx, &args, &end_of_input);
if (end_of_input) {
input_impl_.reset();
return absl::OkStatus();
}
if (i < dataset()->cycle_length_) {
interleave_indices_.push_back(i);
} else {
staging_indices_.push_back(i);
}
workers_[i].SetInputs(s, std::move(args));
std::shared_ptr<IteratorContext> new_ctx(new IteratorContext(*ctx));
worker_threads_.push_back(ctx->StartThread(
strings::StrCat(kDataParallelInterleaveWorker, "_", i),
[this, new_ctx, i]() { WorkerThread(new_ctx, i); }));
}
DCHECK(interleave_indices_.size() == dataset()->cycle_length_);
DCHECK(staging_indices_.size() == dataset()->prefetch_input_elements_);
}
return absl::OkStatus();
}
void WorkerThread(const std::shared_ptr<IteratorContext>& ctx,
const int64_t thread_index) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, thread_index, ctx] {
mutex_lock l(mu_);
workers_[thread_index].cond_var.notify_all();
RecordStop(ctx.get());
});
bool make_new_iterator;
{
tf_shared_lock l(ckpt_mu_);
make_new_iterator =
worker_thread_states_[thread_index].iterator == nullptr &&
worker_thread_states_[thread_index].iterator_creation_status.ok();
}
bool thread_potentially_in_staging = true;
while (true) {
Status iterator_creation_status;
if (make_new_iterator) {
bool read_new_input;
{
tf_shared_lock l(ckpt_mu_);
read_new_input = worker_thread_states_[thread_index].input.empty();
}
if (read_new_input) {
mutex_lock l(mu_);
while (!cancelled_ && !workers_[thread_index].is_producing) {
RecordStop(ctx.get());
workers_[thread_index].cond_var.wait(l);
RecordStart(ctx.get());
}
if (cancelled_) return;
tf_shared_lock ckpt_l(ckpt_mu_);
worker_thread_states_[thread_index].input.swap(
workers_[thread_index].input);
}
{
mutex_lock l(mu_);
thread_potentially_in_staging =
absl::c_find(staging_indices_, thread_index) !=
staging_indices_.end();
}
{
tf_shared_lock l(ckpt_mu_);
worker_thread_states_[thread_index].iterator_creation_status =
MakeIteratorFromInputElement(
ctx.get(), this, worker_thread_states_[thread_index].input,
thread_index, *instantiated_captured_func_, prefix(),
&worker_thread_states_[thread_index].iterator,
model_node());
iterator_creation_status =
worker_thread_states_[thread_index].iterator_creation_status;
if (!iterator_creation_status.ok()) {
worker_thread_states_[thread_index].input.clear();
} else if (thread_potentially_in_staging) {
DisableAutotune(
ctx.get(),
worker_thread_states_[thread_index].iterator.get());
}
}
} else {
tf_shared_lock l(ckpt_mu_);
iterator_creation_status =
worker_thread_states_[thread_index].iterator_creation_status;
make_new_iterator = true;
}
if (!iterator_creation_status.ok()) {
mutex_lock l(mu_);
while (!cancelled_ && workers_[thread_index].outputs.size() ==
dataset()->buffer_output_elements_) {
RecordStop(ctx.get());
workers_[thread_index].cond_var.wait(l);
RecordStart(ctx.get());
}
if (cancelled_) return;
tf_shared_lock ckpt_l(ckpt_mu_);
workers_[thread_index].outputs.emplace_back(iterator_creation_status);
workers_[thread_index].is_producing = false;
worker_thread_states_[thread_index].iterator_creation_status =
absl::OkStatus();
if (deterministic_) {
workers_[thread_index].cond_var.notify_one();
} else {
any_element_available_cond_var_.notify_one();
}
} else {
bool end_of_sequence = false;
while (!end_of_sequence) {
if (thread_potentially_in_staging) {
mutex_lock l(mu_);
thread_potentially_in_staging =
absl::c_find(staging_indices_, thread_index) !=
staging_indices_.end();
if (!thread_potentially_in_staging) {
tf_shared_lock l(ckpt_mu_);
EnableAutotune(
ctx.get(),
worker_thread_states_[thread_index].iterator.get());
}
}
{
tf_shared_lock ckpt_l(ckpt_mu_);
if (worker_thread_states_[thread_index].output_elem.status.ok() &&
worker_thread_states_[thread_index]
.output_elem.output.empty() &&
!worker_thread_states_[thread_index].end_of_sequence) {
int64_t& id =
worker_thread_states_[thread_index].output_elem.id;
tsl::profiler::TraceMe traceme(
[&] {
id = tsl::profiler::TraceMe::NewActivityId();
return tsl::profiler::TraceMeEncode(
"ParallelInterleaveProduce", {{"element_id", id}});
},
profiler::kInfo);
worker_thread_states_[thread_index].output_elem.status =
worker_thread_states_[thread_index].iterator->GetNext(
ctx.get(),
&worker_thread_states_[thread_index].output_elem.output,
&worker_thread_states_[thread_index].end_of_sequence);
end_of_sequence =
worker_thread_states_[thread_index].end_of_sequence;
} else {
end_of_sequence =
worker_thread_states_[thread_index].end_of_sequence;
}
}
{
mutex_lock l(mu_);
while (!cancelled_ && workers_[thread_index].outputs.size() ==
dataset()->buffer_output_elements_) {
RecordStop(ctx.get());
workers_[thread_index].cond_var.wait(l);
RecordStart(ctx.get());
}
if (cancelled_) return;
tf_shared_lock ckpt_l(ckpt_mu_);
workers_[thread_index].is_producing = !end_of_sequence;
if (end_of_sequence) {
worker_thread_states_[thread_index].iterator.reset();
worker_thread_states_[thread_index].input.clear();
worker_thread_states_[thread_index].end_of_sequence = false;
} else {
workers_[thread_index].outputs.emplace_back(
worker_thread_states_[thread_index].output_elem.status,
worker_thread_states_[thread_index].output_elem.id);
workers_[thread_index].outputs.back().output.swap(
worker_thread_states_[thread_index].output_elem.output);
}
worker_thread_states_[thread_index].output_elem.status =
absl::OkStatus();
if (deterministic_) {
workers_[thread_index].cond_var.notify_one();
} else {
any_element_available_cond_var_.notify_one();
}
}
}
}
}
}
Status WriteWorkerStateLocked(IteratorStateWriter* writer, int index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
string iterator_name =
strings::StrCat(prefix(), "::", kWorker, "_", index);
TF_RETURN_IF_ERROR(writer->WriteScalar(iterator_name, kInputSize,
workers_[index].input.size()));
for (int i = 0; i < workers_[index].input.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteTensor(iterator_name,
strings::StrCat(kInput, "_", i),
workers_[index].input[i]));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(iterator_name, kOutputsSize,
workers_[index].outputs.size()));
for (int i = 0; i < workers_[index].outputs.size(); ++i) {
TF_RETURN_IF_ERROR(WriteOutputElemLocked(
writer, workers_[index].outputs[i], iterator_name,
strings::StrCat(kOutputs, "_", i)));
}
if (workers_[index].is_producing) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kIsProducing, ""));
}
return absl::OkStatus();
}
Status ReadWorkerStateLocked(IteratorContext* ctx,
IteratorStateReader* reader, int index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
string worker_prefix =
strings::StrCat(prefix(), "::", kWorker, "_", index);
int64_t input_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(worker_prefix, kInputSize, &input_size));
workers_[index].input.reserve(input_size);
for (int i = 0; i < input_size; ++i) {
workers_[index].input.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), worker_prefix,
strings::StrCat(kInput, "_", i),
&workers_[index].input.back()));
}
int64_t outputs_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(worker_prefix, kOutputsSize, &outputs_size));
for (int i = 0; i < outputs_size; ++i) {
workers_[index].outputs.emplace_back(absl::OkStatus());
TF_RETURN_IF_ERROR(ReadOutputElemLocked(
ctx, reader, &workers_[index].outputs.back(), worker_prefix,
strings::StrCat(kOutputs, "_", i)));
}
if (reader->Contains(worker_prefix, kIsProducing)) {
workers_[index].is_producing = true;
} else {
workers_[index].is_producing = false;
}
return absl::OkStatus();
}
Status WriteWorkerThreadStateLocked(SerializationContext* ctx,
IteratorStateWriter* writer, int index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
string iterator_name =
strings::StrCat(prefix(), "::", kWorkerThread, "_", index);
if (worker_thread_states_[index].iterator != nullptr) {
TF_RETURN_IF_ERROR(
SaveInput(ctx, writer, worker_thread_states_[index].iterator));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kIteratorExhausted, ""));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kInputSize,
worker_thread_states_[index].input.size()));
for (int i = 0; i < worker_thread_states_[index].input.size(); ++i) {
TF_RETURN_IF_ERROR(
writer->WriteTensor(iterator_name, strings::StrCat(kInput, "_", i),
worker_thread_states_[index].input[i]));
}
TF_RETURN_IF_ERROR(WriteStatusLocked(
writer, iterator_name, kIteratorCreationStatus,
worker_thread_states_[index].iterator_creation_status));
TF_RETURN_IF_ERROR(WriteOutputElemLocked(
writer, worker_thread_states_[index].output_elem, iterator_name,
kOutput));
if (worker_thread_states_[index].end_of_sequence) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kEndOfSequence, ""));
}
return absl::OkStatus();
}
Status ReadWorkerThreadStateLocked(IteratorContext* ctx,
IteratorStateReader* reader, int index,
WorkerThreadState* state) {
string worker_prefix =
strings::StrCat(prefix(), "::", kWorkerThread, "_", index);
int64_t input_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(worker_prefix, kInputSize, &input_size));
state->input.reserve(input_size);
for (int i = 0; i < input_size; ++i) {
state->input.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), worker_prefix,
strings::StrCat(kInput, "_", i),
&state->input.back()));
}
if (reader->Contains(worker_prefix, kIteratorExhausted)) {
state->iterator.reset();
} else {
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, state->input, index, *instantiated_captured_func_,
prefix(), &iterator, nullptr));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, iterator));
state->iterator.swap(iterator);
}
TF_RETURN_IF_ERROR(ReadStatusLocked(reader, worker_prefix,
kIteratorCreationStatus,
&state->iterator_creation_status));
TF_RETURN_IF_ERROR(ReadOutputElemLocked(ctx, reader, &state->output_elem,
worker_prefix, kOutput));
if (reader->Contains(worker_prefix, kEndOfSequence)) {
state->end_of_sequence = true;
} else {
state->end_of_sequence = false;
}
return absl::OkStatus();
}
Status WriteOutputElemLocked(IteratorStateWriter* writer,
const OutputElem& output_elem,
const string& iterator_name,
const string& prefix)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
TF_RETURN_IF_ERROR(WriteStatusLocked(
writer, iterator_name, strings::StrCat(prefix, "_", kStatus),
output_elem.status));
TF_RETURN_IF_ERROR(writer->WriteScalar(
iterator_name, strings::StrCat(prefix, "_", kOutputSize),
output_elem.output.size()));
for (int i = 0; i < output_elem.output.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
iterator_name, strings::StrCat(prefix, "_", kOutput, "_", i),
output_elem.output[i]));
}
return absl::OkStatus();
}
Status ReadOutputElemLocked(IteratorContext* ctx,
IteratorStateReader* reader,
OutputElem* output_elem,
const string& iterator_name,
const string& prefix) {
TF_RETURN_IF_ERROR(ReadStatusLocked(reader, iterator_name,
strings::StrCat(prefix, "_", kStatus),
&output_elem->status));
int64_t output_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
iterator_name, strings::StrCat(prefix, "_", kOutputSize),
&output_size));
output_elem->output.reserve(output_size);
for (int i = 0; i < output_size; ++i) {
output_elem->output.emplace_back();
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), iterator_name,
strings::StrCat(prefix, "_", kOutput, "_", i),
&output_elem->output.back()));
}
return absl::OkStatus();
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const string& iterator_name, const string& prefix,
const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
iterator_name, strings::StrCat(prefix, "_", kCode),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
iterator_name, strings::StrCat(prefix, "_", KMessage),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader,
const string& iterator_name, const string& prefix,
Status* status) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(
iterator_name, strings::StrCat(prefix, "_", kCode), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(
iterator_name, strings::StrCat(prefix, "_", KMessage),
&error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
mutex mu_ TF_ACQUIRED_BEFORE(ckpt_mu_);
condition_variable any_element_available_cond_var_;
const bool deterministic_;
mutex ckpt_mu_;
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::vector<WorkerState> workers_ TF_GUARDED_BY(mu_);
std::vector<WorkerThreadState> worker_thread_states_
TF_GUARDED_BY(ckpt_mu_);
std::vector<int64_t> interleave_indices_ TF_GUARDED_BY(mu_);
std::deque<int64_t> staging_indices_ TF_GUARDED_BY(mu_);
size_t next_index_ TF_GUARDED_BY(mu_) = 0;
size_t block_count_ TF_GUARDED_BY(mu_) = 0;
bool cancelled_ TF_GUARDED_BY(mu_) = false;
std::vector<std::unique_ptr<Thread>> worker_threads_ TF_GUARDED_BY(mu_);
std::function<void()> deregister_fn_;
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const int64_t cycle_length_;
const int64_t block_length_;
const DeterminismPolicy deterministic_;
const int64_t buffer_output_elements_;
const int64_t prefetch_input_elements_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
const int op_version_;
};
ParallelInterleaveDatasetOp::ParallelInterleaveDatasetOp(
OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->HasAttr(kDeterministic) ? 2 : 1) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
if (op_version_ == 2) {
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(
ctx, DeterminismPolicy::FromString(deterministic, &deterministic_));
}
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void ParallelInterleaveDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t cycle_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kCycleLength, &cycle_length));
OP_REQUIRES(ctx, cycle_length > 0,
errors::InvalidArgument("`cycle_length` must be > 0"));
int64_t block_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kBlockLength, &block_length));
OP_REQUIRES(ctx, block_length > 0,
errors::InvalidArgument("`block_length` must be > 0"));
if (op_version_ == 1) {
bool sloppy = false;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kSloppy, &sloppy));
if (sloppy) {
deterministic_ =
DeterminismPolicy(DeterminismPolicy::Type::kNondeterministic);
} else {
deterministic_ =
DeterminismPolicy(DeterminismPolicy::Type::kDeterministic);
}
}
int64_t buffer_output_elements = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kBufferOutputElements,
&buffer_output_elements));
OP_REQUIRES(ctx, buffer_output_elements > 0,
errors::InvalidArgument("`buffer_output_elements` must be > 0"));
int64_t prefetch_input_elements = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kPrefetchInputElements,
&prefetch_input_elements));
OP_REQUIRES(
ctx, prefetch_input_elements >= 0,
errors::InvalidArgument("`prefetch_input_elements` must be >= 0"));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), cycle_length,
block_length, deterministic_, buffer_output_elements,
prefetch_input_elements, output_types_, output_shapes_,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ParallelInterleaveDataset").Device(DEVICE_CPU),
ParallelInterleaveDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalParallelInterleaveDataset").Device(DEVICE_CPU),
ParallelInterleaveDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("LegacyParallelInterleaveDatasetV2").Device(DEVICE_CPU),
ParallelInterleaveDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ParallelInterleaveDataset");
REGISTER_INPUT_COLOCATION_EXEMPTION("ExperimentalParallelInterleaveDataset");
REGISTER_INPUT_COLOCATION_EXEMPTION("LegacyParallelInterleaveDatasetV2");
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "parallel_interleave_dataset";
constexpr int kOpVersion = 2;
class ParallelInterleaveDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelInterleaveDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int64_t cycle_length, int64_t block_length,
const std::string& deterministic, int64_t buffer_output_elements,
int64_t prefetch_input_elements, FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
cycle_length_(cycle_length),
block_length_(block_length),
deterministic_(deterministic),
buffer_output_elements_(buffer_output_elements),
prefetch_input_elements_(prefetch_input_elements),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix(), params);
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {cycle_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {block_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {buffer_output_elements_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {prefetch_input_elements_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(ParallelInterleaveDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelInterleaveDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelInterleaveDatasetOp::kCycleLength);
input_names->emplace_back(ParallelInterleaveDatasetOp::kBlockLength);
input_names->emplace_back(
ParallelInterleaveDatasetOp::kBufferOutputElements);
input_names->emplace_back(
ParallelInterleaveDatasetOp::kPrefetchInputElements);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"deterministic", deterministic_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelInterleaveDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int64_t cycle_length_;
int64_t block_length_;
std::string deterministic_;
int64_t buffer_output_elements_;
int64_t prefetch_input_elements_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class ParallelInterleaveDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MakeTensorSliceDatasetFunc(
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
return FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{TensorSliceDatasetOp::kToutputTypes, output_types},
{TensorSliceDatasetOp::kOutputShapes, output_shapes}});
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
1,
DeterminismPolicy::kDeterministic,
1,
0,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
3,
1,
DeterminismPolicy::kNondeterministic,
3,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
5,
1,
DeterminismPolicy::kNondeterministic,
1,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(
TensorShape{3, 3, 1}, {"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
2,
DeterminismPolicy::kDeterministic,
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams EmptyInputParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{Tensor{}},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
2,
DeterminismPolicy::kNondeterministic,
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_FLOAT}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_FLOAT},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidCycleLengthParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
0,
1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidBlockLengthParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
-1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidBufferOutputElementsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
0,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidPrefetchInputElementsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
1,
-1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<ParallelInterleaveDatasetParams>>
GetNextTestCases() {
return {{ParallelInterleaveDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams3(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams5(),
CreateTensors<tstring>(
TensorShape{1},
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}}),
false},
{EmptyInputParams(),
CreateTensors<tstring>(TensorShape{1}, {}),
true}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams, GetNextTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelInterleaveDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({1})}));
}
std::vector<CardinalityTestCase<ParallelInterleaveDatasetParams>>
CardinalityTestCases() {
return {{ParallelInterleaveDatasetParams1(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams2(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams3(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams4(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams5(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams,
CardinalityTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ParallelInterleaveDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({1})}));
}
TEST_F(ParallelInterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelInterleaveDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelInterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelInterleaveDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams5(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape{1},
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}}),
false}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, InvalidArguments) {
std::vector<ParallelInterleaveDatasetParams> invalid_params = {
InvalidCycleLengthParams(), InvalidBlockLengthParams(),
InvalidBufferOutputElementsParams(),
InvalidPrefetchInputElementsParams()};
for (auto& dataset_params : invalid_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d96664ba-3a28-4df3-85b0-d237c15b7457 | cpp | tensorflow/tensorflow | interleave_dataset_op | tensorflow/core/kernels/data/interleave_dataset_op.cc | tensorflow/core/kernels/data/interleave_dataset_op_test.cc | #include "tensorflow/core/kernels/data/interleave_dataset_op.h"
#include <algorithm>
#include <memory>
#include <optional>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const InterleaveDatasetOp::kDatasetType;
constexpr const char* const InterleaveDatasetOp::kInputDataset;
constexpr const char* const InterleaveDatasetOp::kOtherArguments;
constexpr const char* const InterleaveDatasetOp::kCycleLength;
constexpr const char* const InterleaveDatasetOp::kBlockLength;
constexpr const char* const InterleaveDatasetOp::kFunc;
constexpr const char* const InterleaveDatasetOp::kTarguments;
constexpr const char* const InterleaveDatasetOp::kOutputTypes;
constexpr const char* const InterleaveDatasetOp::kOutputShapes;
constexpr char kCycleIndex[] = "cycle_index";
constexpr char kBlockIndex[] = "block_index";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kNumOpen[] = "num_open";
constexpr char kArgsSize[] = "args_size";
constexpr char kArgsList[] = "args_list_";
constexpr char kCurrentElementsUninitialized[] =
"current_elements_uninitialized";
constexpr char kNextInputElementIndex[] = "next_input_element_index";
constexpr char kLastCheckpointedInputElementIndex[] =
"last_checkpointed_input_element_index";
constexpr char kInputElementIndices[] = "input_element_indices";
class InterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func, int64_t cycle_length,
int64_t block_length, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
cycle_length_(cycle_length),
block_length_(block_length),
output_types_(output_types),
output_shapes_(output_shapes),
traceme_metadata_(
{{"block_length",
strings::Printf("%lld", static_cast<long long>(block_length))},
{"cycle_length",
strings::Printf("%lld", static_cast<long long>(cycle_length))}}) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* cycle_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(cycle_length_, &cycle_length_node));
Node* block_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(block_length_, &block_length_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_node}, {2, cycle_length_node}, {3, block_length_node}},
{{1, other_arguments}},
{{kFunc, f}, {kTarguments, other_arguments_types_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
current_elements_(params.dataset->cycle_length_) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry());
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
void AdvanceToNextInCycle() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
block_index_ = 0;
cycle_index_ = (cycle_index_ + 1) % dataset()->cycle_length_;
}
Status AdvancePosition(int num_elements) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
block_index_ += num_elements;
if (block_index_ == dataset()->block_length_) {
AdvanceToNextInCycle();
return absl::OkStatus();
} else if (block_index_ < dataset()->block_length_) {
return absl::OkStatus();
}
return absl::InternalError(
"Something went wrong as `block_index_` should never be larger than "
"`dataset()->block_length_`");
}
void AdvancePosition() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
++block_index_;
if (block_index_ == dataset()->block_length_) {
AdvanceToNextInCycle();
}
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
while (!end_of_input_ || num_open_ > 0) {
if (current_elements_[cycle_index_]) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
CurrentElement& current_element = *current_elements_[cycle_index_];
TF_RETURN_IF_ERROR(current_element.iterator->GetNext(
&nested_ctx, out_tensors, &end_of_element));
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
AdvancePosition();
*end_of_sequence = false;
return absl::OkStatus();
} else {
ctx->PurgeCheckpoint(current_element.iterator->prefix());
UpdateSymbolicCheckpointAfterCurrentElementFinished(
*ctx, *current_elements_[cycle_index_]);
current_elements_[cycle_index_].reset();
--num_open_;
AdvanceToNextInCycle();
}
} else {
TF_RETURN_IF_ERROR(MoveToNextElement(ctx));
}
}
ctx->MergeCheckpoint(input_ckpt_.get());
*end_of_sequence = true;
return absl::OkStatus();
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
mutex_lock l(mu_);
*num_skipped = 0;
while (!end_of_input_ || num_open_ > 0) {
if (current_elements_[cycle_index_]) {
CurrentElement& current_element = *current_elements_[cycle_index_];
int element_num_to_skip = num_to_skip - *num_skipped;
if (element_num_to_skip > dataset()->block_length_ - block_index_) {
element_num_to_skip = dataset()->block_length_ - block_index_;
}
bool end_of_element = false;
int element_num_skipped = 0;
auto nested_ctx = MakeNestedIteratorContext(ctx);
TF_RETURN_IF_ERROR(current_element.iterator->Skip(
&nested_ctx, element_num_to_skip, &end_of_element,
&element_num_skipped));
*num_skipped += element_num_skipped;
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
TF_RETURN_IF_ERROR(AdvancePosition(element_num_skipped));
} else {
ctx->PurgeCheckpoint(current_element.iterator->prefix());
UpdateSymbolicCheckpointAfterCurrentElementFinished(
*ctx, *current_elements_[cycle_index_]);
current_elements_[cycle_index_].reset();
--num_open_;
AdvanceToNextInCycle();
}
if (num_to_skip == *num_skipped) {
*end_of_sequence = false;
return absl::OkStatus();
}
} else {
TF_RETURN_IF_ERROR(MoveToNextElement(ctx));
}
}
ctx->MergeCheckpoint(input_ckpt_.get());
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args), {model::MakeNonTunableParameter(
kCycleLength, dataset()->cycle_length_)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCycleIndex, cycle_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBlockIndex, block_index_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kEndOfInput, static_cast<int64_t>(end_of_input_)));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNumOpen, num_open_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNextInputElementIndex,
next_input_element_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kLastCheckpointedInputElementIndex,
last_checkpointed_input_element_index_));
TF_RETURN_IF_ERROR(SaveCurrentElements(ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t cycle_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCycleIndex, &cycle_index));
cycle_index_ = size_t(cycle_index);
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBlockIndex, &block_index_));
int64_t end_of_input;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kEndOfInput, &end_of_input));
end_of_input_ = static_cast<bool>(end_of_input);
int64_t num_open;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNumOpen, &num_open));
num_open_ = size_t(num_open);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNextInputElementIndex,
&next_input_element_index_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kLastCheckpointedInputElementIndex,
&last_checkpointed_input_element_index_));
int64_t cycle_length = dataset()->cycle_length_;
std::vector<InputOffset> input_element_indices(cycle_length, -1);
std::vector<std::optional<MemoryCheckpoint>> checkpoints(cycle_length);
std::vector<std::vector<Tensor>> args(cycle_length);
if (ctx->symbolic_checkpoint()) {
auto status_or = RestoreInputOffsets(*reader);
if (!status_or.ok()) {
return status_or.status();
}
auto& input_offset_w_cycle_idxs = status_or.value();
TF_RETURN_IF_ERROR(RestoreArgsListAndInputOffsetCycleIdxMap(
*ctx, input_element_indices, checkpoints, args,
input_offset_w_cycle_idxs));
}
TF_RETURN_IF_ERROR(
RestoreCurrentElements(ctx, reader, input_element_indices,
std::move(checkpoints), std::move(args)));
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
using InputOffset = int64_t;
using CycleIdx = int;
struct CurrentElement;
struct InputOffsetWithCycleIdx;
int64_t GetSubIteratorIndexForPrefix(bool symbolic_checkpoint)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return GetSubIteratorIndexForPrefix(symbolic_checkpoint, cycle_index_,
next_input_element_index_);
}
int64_t GetSubIteratorIndexForPrefix(
bool symbolic_checkpoint, int64_t cycle_index,
std::optional<int64_t> input_element_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (symbolic_checkpoint) ? (input_element_index.value())
: (cycle_index);
}
Status SaveCurrentElements(SerializationContext* ctx,
IteratorStateWriter* writer)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (int idx = 0; idx < current_elements_.size(); idx++) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", idx, "]"),
!current_elements_[idx]));
if (!current_elements_[idx]) {
continue;
}
if (!ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(
SaveInput(ctx, writer, current_elements_[idx]->iterator));
const auto& args = current_elements_[idx]->args;
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kArgsSize, "[", idx, "]"),
args.size()));
for (int i = 0; i < args.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kArgsList, "[", idx, "][", i, "]"),
args[i]));
}
} else {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kInputElementIndices, "[", idx, "]"),
current_elements_[idx]->input_element_index));
}
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<InputOffsetWithCycleIdx>> RestoreInputOffsets(
IteratorStateReader& reader) {
std::vector<InputOffsetWithCycleIdx> input_offsets;
int64_t cycle_length = dataset()->cycle_length_;
for (int cycle_idx = 0; cycle_idx < cycle_length; cycle_idx++) {
int64_t current_element_uninitialized;
TF_RETURN_IF_ERROR(reader.ReadScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", cycle_idx, "]"),
¤t_element_uninitialized));
if (!current_element_uninitialized) {
int64_t input_element_index;
TF_RETURN_IF_ERROR(reader.ReadScalar(
prefix(),
strings::StrCat(kInputElementIndices, "[", cycle_idx, "]"),
&input_element_index));
input_offsets.push_back(
InputOffsetWithCycleIdx{input_element_index, cycle_idx});
}
}
return std::move(input_offsets);
}
Status RestoreArgsListAndInputOffsetCycleIdxMap(
IteratorContext& ctx, std::vector<InputOffset>& input_element_indices,
std::vector<std::optional<MemoryCheckpoint>>& checkpoints,
std::vector<std::vector<Tensor>>& args,
std::vector<InputOffsetWithCycleIdx>& input_offset_w_cycle_idxs)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (input_element_indices.size() != dataset()->cycle_length_ ||
checkpoints.size() != dataset()->cycle_length_ ||
args.size() != dataset()->cycle_length_) {
return absl::FailedPreconditionError(
"input_element_indices, checkpoints and args should be of same "
"length");
}
std::sort(input_offset_w_cycle_idxs.begin(),
input_offset_w_cycle_idxs.end(),
[](const InputOffsetWithCycleIdx& lhs,
const InputOffsetWithCycleIdx& rhs) {
return lhs.input_element_index < rhs.input_element_index;
});
bool end_of_sequence = false;
int num_to_skip;
int num_actually_skip;
InputOffset prev_input_element_index =
last_checkpointed_input_element_index_;
auto input_ctx = std::make_unique<IteratorContext>(ctx);
for (const auto& input_offset_w_cycle_idx : input_offset_w_cycle_idxs) {
InputOffset input_element_index =
input_offset_w_cycle_idx.input_element_index;
CycleIdx cycle_idx = input_offset_w_cycle_idx.cycle_idx;
if (input_element_index >= next_input_element_index_) {
return absl::FailedPreconditionError(
"input_element_index < next_input_element_index_ must be "
"met.");
}
num_to_skip = input_element_index - prev_input_element_index - 1;
TF_RETURN_IF_ERROR(input_impl_->Skip(input_ctx.get(), num_to_skip,
&end_of_sequence,
&num_actually_skip));
if (end_of_sequence || num_actually_skip != num_to_skip) {
return absl::InternalError(
"Unexpected end of sequence while symbolically restoring "
"InterleaveDataset. Please verify that the input produces data "
"deterministically.");
}
std::vector<Tensor> current_element_args;
TF_RETURN_IF_ERROR(input_impl_->GetNext(
input_ctx.get(), ¤t_element_args, &end_of_sequence));
prev_input_element_index = input_element_index;
checkpoints[cycle_idx].emplace(*input_ctx->checkpoint());
args[cycle_idx] = std::move(current_element_args);
input_element_indices[cycle_idx] = input_element_index;
}
num_to_skip = next_input_element_index_ - prev_input_element_index - 1;
TF_RETURN_IF_ERROR(input_impl_->Skip(
input_ctx.get(), num_to_skip, &end_of_sequence, &num_actually_skip));
if (end_of_sequence || num_actually_skip != num_to_skip) {
return absl::InternalError(
"Unexpected end of sequence while symbolically restoring "
"InterleaveDataset. Please verify that the input produces data "
"deterministically.");
}
input_ckpt_->Merge(input_ctx->checkpoint());
return absl::OkStatus();
}
Status RestoreCurrentElements(
IteratorContext* ctx, IteratorStateReader* reader,
std::vector<InputOffset>& input_element_indices,
std::vector<std::optional<MemoryCheckpoint>>&& checkpoints,
std::vector<std::vector<Tensor>>&& args)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (int idx = 0; idx < current_elements_.size(); idx++) {
int64_t current_element_uninitialized;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", idx, "]"),
¤t_element_uninitialized));
if (!current_element_uninitialized) {
if (!ctx->symbolic_checkpoint()) {
int64_t args_size;
std::vector<Tensor> current_element_args;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kArgsSize, "[", idx, "]"),
&args_size));
current_element_args.resize(args_size);
for (int i = 0; i < args_size; i++) {
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix(),
strings::StrCat(kArgsList, "[", idx, "][", i, "]"),
¤t_element_args[i]));
}
args[idx] = std::move(current_element_args);
}
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, args[idx],
GetSubIteratorIndexForPrefix(ctx->symbolic_checkpoint(), idx,
input_element_indices[idx]),
*instantiated_captured_func_, prefix(), &iterator,
nullptr));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, iterator));
current_elements_[idx].emplace(
std::move(checkpoints[idx]), std::move(args[idx]),
input_element_indices[idx], std::move(iterator));
} else {
current_elements_[idx].reset();
}
}
return absl::OkStatus();
}
Status MoveToNextElement(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!end_of_input_) {
IteratorContext input_ctx = MakeNestedIteratorContext(ctx);
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(
input_impl_->GetNext(&input_ctx, &args, &end_of_input_));
input_ckpt_->Merge(input_ctx.checkpoint());
if (!end_of_input_) {
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, args,
GetSubIteratorIndexForPrefix(ctx->symbolic_checkpoint()),
*instantiated_captured_func_, prefix(), &iterator, model_node()));
++num_open_;
std::optional<MemoryCheckpoint> checkpoint;
if (ctx->symbolic_checkpoint()) {
checkpoint.emplace(*input_ckpt_);
}
current_elements_[cycle_index_].emplace(
std::move(checkpoint), std::move(args), next_input_element_index_,
std::move(iterator));
next_input_element_index_++;
}
} else {
AdvanceToNextInCycle();
}
return absl::OkStatus();
}
InputOffset IsEarliestInputElementIndex(InputOffset input_element_index) {
InputOffset min_input_element_index = input_element_index;
for (int i = 0; i < current_elements_.size(); ++i) {
if (!current_elements_[i]) continue;
if (current_elements_[i]->input_element_index <
min_input_element_index) {
min_input_element_index = current_elements_[i]->input_element_index;
}
}
return (min_input_element_index == input_element_index);
}
void UpdateSymbolicCheckpointAfterCurrentElementFinished(
IteratorContext& ctx, CurrentElement& current_element)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!ctx.symbolic_checkpoint()) {
return;
}
InputOffset input_element_index = current_element.input_element_index;
if (IsEarliestInputElementIndex(input_element_index)) {
MemoryCheckpoint& checkpoint =
const_cast<MemoryCheckpoint&>(current_element.checkpoint.value());
ctx.MergeCheckpoint(&checkpoint);
last_checkpointed_input_element_index_ = input_element_index;
}
}
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
struct CurrentElement {
const std::optional<MemoryCheckpoint> checkpoint = std::nullopt;
const InputOffset input_element_index = -1;
const std::vector<Tensor> args;
std::unique_ptr<IteratorBase> iterator = nullptr;
explicit CurrentElement(std::optional<MemoryCheckpoint>&& checkpoint,
std::vector<Tensor>&& args,
InputOffset input_element_index,
std::unique_ptr<IteratorBase> iterator)
: checkpoint(std::move(checkpoint)),
input_element_index(input_element_index),
args(std::move(args)),
iterator(std::move(iterator)) {}
CurrentElement(CurrentElement&& other) = default;
};
struct InputOffsetWithCycleIdx {
InputOffset input_element_index;
CycleIdx cycle_idx;
};
std::vector<std::optional<CurrentElement>> current_elements_;
InputOffset last_checkpointed_input_element_index_ TF_GUARDED_BY(mu_) = -1;
InputOffset next_input_element_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<MemoryCheckpoint> input_ckpt_ TF_GUARDED_BY(mu_);
size_t cycle_index_ TF_GUARDED_BY(mu_) = 0;
int64_t block_index_ TF_GUARDED_BY(mu_) = 0;
bool end_of_input_ TF_GUARDED_BY(mu_) = false;
size_t num_open_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const int64_t cycle_length_;
const int64_t block_length_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
};
InterleaveDatasetOp::InterleaveDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void InterleaveDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t cycle_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kCycleLength, &cycle_length));
if (cycle_length == model::kAutotune) {
cycle_length = port::MaxParallelism();
}
OP_REQUIRES(
ctx, cycle_length > 0,
errors::InvalidArgument("cycle_length must be greater than zero."));
int64_t block_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kBlockLength, &block_length));
OP_REQUIRES(
ctx, block_length > 0,
errors::InvalidArgument("block_length must be greater than zero."));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), cycle_length,
block_length, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("InterleaveDataset").Device(DEVICE_CPU),
InterleaveDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("InterleaveDataset");
}
}
} | #include "tensorflow/core/kernels/data/interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "interleave_dataset";
class InterleaveDatasetParams : public DatasetParams {
public:
template <typename T>
InterleaveDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
int64_t cycle_length, int64_t block_length,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
cycle_length_(cycle_length),
block_length_(block_length),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {cycle_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {block_length_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size() + 2);
input_names->emplace_back(InterleaveDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(InterleaveDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(InterleaveDatasetOp::kCycleLength);
input_names->emplace_back(InterleaveDatasetOp::kBlockLength);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override {
return InterleaveDatasetOp::kDatasetType;
}
private:
std::vector<Tensor> other_arguments_;
int64_t cycle_length_;
int64_t block_length_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class InterleaveDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MakeTensorSliceDatasetFunc(
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
return FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{"Toutput_types", output_types},
{"output_shapes", output_shapes}});
}
InterleaveDatasetParams InterleaveDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
3,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
5,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
3,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams7() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
5,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParamsWithInvalidCycleLength() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
0,
5,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParamsWithInvalidBlockLength() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
-1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<InterleaveDatasetParams>> GetNextTestCases() {
return {
{InterleaveDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams3(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams4(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams5(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams6(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams7(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})}};
}
ITERATOR_GET_NEXT_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<InterleaveDatasetParams>> SkipTestCases() {
return {{InterleaveDatasetParams1(),
0, 0, true,
CreateTensors<int64_t>(TensorShape({1}), {{0}})},
{InterleaveDatasetParams1(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{5}})},
{InterleaveDatasetParams1(),
10, 9},
{InterleaveDatasetParams2(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{5}})},
{InterleaveDatasetParams2(),
10, 9},
{InterleaveDatasetParams3(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{7}})},
{InterleaveDatasetParams3(),
10, 9},
{InterleaveDatasetParams4(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{7}})},
{InterleaveDatasetParams4(),
10, 9},
{InterleaveDatasetParams5(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"e"}})},
{InterleaveDatasetParams5(),
10, 9},
{InterleaveDatasetParams6(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"d"}})},
{InterleaveDatasetParams6(),
10, 9},
{InterleaveDatasetParams7(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"d"}})},
{InterleaveDatasetParams7(),
10, 9}};
}
ITERATOR_SKIP_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
SkipTestCases())
TEST_F(InterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(InterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(InterleaveDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<InterleaveDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{InterleaveDatasetParams1(),
{DT_INT64}},
{InterleaveDatasetParams2(),
{DT_INT64}},
{InterleaveDatasetParams3(),
{DT_INT64}},
{InterleaveDatasetParams4(),
{DT_INT64}},
{InterleaveDatasetParams5(),
{DT_STRING}},
{InterleaveDatasetParams6(),
{DT_STRING}},
{InterleaveDatasetParams7(),
{DT_STRING}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<InterleaveDatasetParams>>
DatasetOutputShapesTestCases() {
return {{InterleaveDatasetParams1(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams2(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams3(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams4(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams5(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams6(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams7(),
{PartialTensorShape({1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<InterleaveDatasetParams>>
CardinalityTestCases() {
return {{InterleaveDatasetParams1(),
kUnknownCardinality},
{InterleaveDatasetParams2(),
kUnknownCardinality},
{InterleaveDatasetParams3(),
kUnknownCardinality},
{InterleaveDatasetParams4(),
kUnknownCardinality},
{InterleaveDatasetParams5(),
kUnknownCardinality},
{InterleaveDatasetParams6(),
kUnknownCardinality},
{InterleaveDatasetParams7(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<InterleaveDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{InterleaveDatasetParams1(),
{DT_INT64}},
{InterleaveDatasetParams2(),
{DT_INT64}},
{InterleaveDatasetParams3(),
{DT_INT64}},
{InterleaveDatasetParams4(),
{DT_INT64}},
{InterleaveDatasetParams5(),
{DT_STRING}},
{InterleaveDatasetParams6(),
{DT_STRING}},
{InterleaveDatasetParams7(),
{DT_STRING}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<InterleaveDatasetParams>>
IteratorOutputShapesTestCases() {
return {{InterleaveDatasetParams1(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams2(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams3(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams4(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams5(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams6(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams7(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(InterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
InterleaveDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<InterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{InterleaveDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams5(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams6(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams7(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(InterleaveDatasetOpTest,
InterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(InterleaveDatasetOpTest, InvalidCycleLength) {
auto dataset_params = InterleaveDatasetParamsWithInvalidCycleLength();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(InterleaveDatasetOpTest, InvalidLength) {
auto dataset_params = InterleaveDatasetParamsWithInvalidBlockLength();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/interleave_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/interleave_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e41b562-c8e5-4977-bc45-be1e3cf242f7 | cpp | tensorflow/tensorflow | text_line_dataset_op | tensorflow/core/kernels/data/text_line_dataset_op.cc | tensorflow/core/kernels/data/text_line_dataset_op_test.cc | #include "tensorflow/core/kernels/data/text_line_dataset_op.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
namespace tensorflow {
namespace data {
constexpr const char* const TextLineDatasetOp::kDatasetType;
constexpr const char* const TextLineDatasetOp::kFileNames;
constexpr const char* const TextLineDatasetOp::kCompressionType;
constexpr const char* const TextLineDatasetOp::kBufferSize;
constexpr char kZLIB[] = "ZLIB";
constexpr char kGZIP[] = "GZIP";
constexpr char kCurrentFileIndex[] = "current_file_index";
constexpr char kCurrentPos[] = "current_pos";
class TextLineDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<string> filenames,
const string& compression_type,
const io::ZlibCompressionOptions& options)
: DatasetBase(DatasetContext(ctx)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
use_compression_(!compression_type.empty()),
options_(options) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this,
name_utils::IteratorPrefix(TextLineDatasetOp::kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* filenames = nullptr;
Node* compression_type = nullptr;
Node* buffer_size = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames));
TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type));
TF_RETURN_IF_ERROR(b->AddScalar(options_.input_buffer_size, &buffer_size));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {filenames, compression_type, buffer_size}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (buffered_input_stream_) {
Tensor line_contents(tstring{});
tstring& line_contents_str = line_contents.scalar<tstring>()();
Status s = buffered_input_stream_->ReadLine(&line_contents_str);
if (s.ok()) {
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(
name_utils::OpName(TextLineDatasetOp::kDatasetType));
bytes_counter->IncrementBy(line_contents_str.size());
out_tensors->push_back(std::move(line_contents));
*end_of_sequence = false;
return absl::OkStatus();
} else if (!errors::IsOutOfRange(s)) {
return s;
}
ResetStreamsLocked();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
if (buffered_input_stream_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentPos,
buffered_input_stream_->Tell()));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
ResetStreamsLocked();
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
if (reader->Contains(prefix(), kCurrentPos)) {
int64_t current_pos;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentPos, ¤t_pos));
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
TF_RETURN_IF_ERROR(buffered_input_stream_->Seek(current_pos));
}
return absl::OkStatus();
}
private:
Status SetupStreamsLocked(Env* env) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (current_file_index_ >= dataset()->filenames_.size()) {
return errors::InvalidArgument(
"current_file_index_:", current_file_index_,
" >= filenames_.size():", dataset()->filenames_.size());
}
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
input_stream_ =
std::make_unique<io::RandomAccessInputStream>(file_.get(), false);
if (dataset()->use_compression_) {
zlib_input_stream_ = std::make_unique<io::ZlibInputStream>(
input_stream_.get(), dataset()->options_.input_buffer_size,
dataset()->options_.input_buffer_size, dataset()->options_);
buffered_input_stream_ = std::make_unique<io::BufferedInputStream>(
zlib_input_stream_.get(), dataset()->options_.input_buffer_size,
false);
} else {
buffered_input_stream_ = std::make_unique<io::BufferedInputStream>(
input_stream_.get(), dataset()->options_.input_buffer_size, false);
}
return absl::OkStatus();
}
void ResetStreamsLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
input_stream_.reset();
zlib_input_stream_.reset();
buffered_input_stream_.reset();
file_.reset();
}
mutex mu_;
std::unique_ptr<io::RandomAccessInputStream> input_stream_
TF_GUARDED_BY(mu_);
std::unique_ptr<io::ZlibInputStream> zlib_input_stream_ TF_GUARDED_BY(mu_);
std::unique_ptr<io::BufferedInputStream> buffered_input_stream_
TF_GUARDED_BY(mu_);
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_
TF_GUARDED_BY(mu_);
};
const std::vector<string> filenames_;
const tstring compression_type_;
const bool use_compression_;
const io::ZlibCompressionOptions options_;
};
TextLineDatasetOp::TextLineDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {}
void TextLineDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
const Tensor* filenames_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kFileNames, &filenames_tensor));
OP_REQUIRES(
ctx, filenames_tensor->dims() <= 1,
errors::InvalidArgument("`filenames` must be a scalar or a vector."));
tstring compression_type;
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kCompressionType,
&compression_type));
int64_t buffer_size = -1;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(
ctx, buffer_size >= 0,
errors::InvalidArgument("`buffer_size` must be >= 0 (0 == default)"));
io::ZlibCompressionOptions zlib_compression_options =
io::ZlibCompressionOptions::DEFAULT();
if (compression_type == kZLIB) {
zlib_compression_options = io::ZlibCompressionOptions::DEFAULT();
} else if (compression_type == kGZIP) {
zlib_compression_options = io::ZlibCompressionOptions::GZIP();
} else {
OP_REQUIRES(ctx, compression_type.empty(),
errors::InvalidArgument("Unsupported compression_type."));
}
if (buffer_size != 0) {
zlib_compression_options.input_buffer_size = buffer_size;
}
std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<tstring>()(i));
metrics::RecordTFDataFilename(kDatasetType, filenames[i]);
}
LogFilenames(filenames);
*output = new Dataset(ctx, std::move(filenames), compression_type,
zlib_compression_options);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TextLineDataset").Device(DEVICE_CPU),
TextLineDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/text_line_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "text_line_dataset";
tstring LocalTempFilename() {
std::string path;
CHECK(Env::Default()->LocalTempFilename(&path));
return tstring(path);
}
class TextLineDatasetParams : public DatasetParams {
public:
TextLineDatasetParams(std::vector<tstring> filenames,
CompressionType compression_type, int64_t buffer_size,
string node_name)
: DatasetParams({DT_STRING}, {PartialTensorShape({})},
std::move(node_name)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
buffer_size_(buffer_size) {}
std::vector<Tensor> GetInputTensors() const override {
int num_files = filenames_.size();
return {
CreateTensor<tstring>(TensorShape({num_files}), filenames_),
CreateTensor<tstring>(TensorShape({}), {ToString(compression_type_)}),
CreateTensor<int64_t>(TensorShape({}), {buffer_size_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
*input_names = {
TextLineDatasetOp::kFileNames,
TextLineDatasetOp::kCompressionType,
TextLineDatasetOp::kBufferSize,
};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return TextLineDatasetOp::kDatasetType;
}
private:
std::vector<tstring> filenames_;
CompressionType compression_type_;
int64_t buffer_size_;
};
class TextLineDatasetOpTest : public DatasetOpsTestBase {};
Status CreateTestFiles(const std::vector<tstring>& filenames,
const std::vector<tstring>& contents,
CompressionType compression_type) {
if (filenames.size() != contents.size()) {
return tensorflow::errors::InvalidArgument(
"The number of files does not match with the contents");
}
CompressionParams params;
params.output_buffer_size = 10;
params.compression_type = compression_type;
for (int i = 0; i < filenames.size(); ++i) {
TF_RETURN_IF_ERROR(
WriteDataToFile(filenames[i], contents[i].data(), params));
}
return absl::OkStatus();
}
TextLineDatasetParams TextLineDatasetParams1() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<tstring> contents = {
absl::StrCat("hello world\n", "11223334455\n"),
absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")};
CompressionType compression_type = CompressionType::ZLIB;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TextLineDatasetParams(filenames,
compression_type,
10,
kNodeName);
}
TextLineDatasetParams TextLineDatasetParams2() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<tstring> contents = {
absl::StrCat("hello world\n", "11223334455\n"),
absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")};
CompressionType compression_type = CompressionType::GZIP;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TextLineDatasetParams(filenames,
compression_type,
10,
kNodeName);
}
TextLineDatasetParams TextLineDatasetParams3() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<tstring> contents = {
absl::StrCat("hello world\n", "11223334455\n"),
absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TextLineDatasetParams(filenames,
compression_type,
10,
kNodeName);
}
std::vector<GetNextTestCase<TextLineDatasetParams>> GetNextTestCases() {
return {{TextLineDatasetParams1(),
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams2(),
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams3(),
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})}};
}
ITERATOR_GET_NEXT_TEST_P(TextLineDatasetOpTest, TextLineDatasetParams,
GetNextTestCases())
TEST_F(TextLineDatasetOpTest, DatasetNodeName) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TextLineDatasetOpTest, DatasetTypeString) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TextLineDatasetOp::kDatasetType)));
}
TEST_F(TextLineDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_STRING}));
}
TEST_F(TextLineDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(TextLineDatasetOpTest, Cardinality) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(TextLineDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_STRING}));
}
TEST_F(TextLineDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(TextLineDatasetOpTest, IteratorPrefix) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
TextLineDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TextLineDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{TextLineDatasetParams1(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams2(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams3(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(TextLineDatasetOpTest, TextLineDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/text_line_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/text_line_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b237aaf1-e2e0-46bd-9a9f-7c64978e4db3 | cpp | tensorflow/tensorflow | window_dataset_op | tensorflow/core/kernels/data/window_dataset_op.cc | tensorflow/core/kernels/data/window_dataset_op_test.cc | #include "tensorflow/core/kernels/data/window_dataset_op.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/data/window_dataset.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const WindowDatasetOp::kDatasetType;
constexpr const char* const WindowDatasetOp::kInputDataset;
constexpr const char* const WindowDatasetOp::kSize;
constexpr const char* const WindowDatasetOp::kShift;
constexpr const char* const WindowDatasetOp::kStride;
constexpr const char* const WindowDatasetOp::kDropRemainder;
constexpr const char* const WindowDatasetOp::kOutputTypes;
constexpr const char* const WindowDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kBufferSize[] = "buffer_size";
constexpr char kBuffer[] = "buffer";
constexpr char kSizeSuffix[] = ".size";
constexpr char kCodeSuffix[] = ".code";
constexpr char kErrorMessage[] = ".error_message";
class WindowDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t window_size,
int64_t window_shift, int64_t window_stride, bool drop_remainder)
: DatasetBase(DatasetContext(ctx)),
input_(input),
window_size_(window_size),
window_shift_(window_shift),
window_stride_(window_stride),
drop_remainder_(drop_remainder),
output_dtypes_(input_->output_dtypes().size(), {DT_VARIANT}),
output_shapes_(input_->output_shapes().size(), TensorShape({})),
traceme_metadata_(
{{"window_size",
strings::Printf("%lld", static_cast<long long>(window_size))},
{"window_shift",
strings::Printf("%lld", static_cast<long long>(window_shift))},
{"window_stride", strings::Printf("%lld", static_cast<long long>(
window_stride))}}) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return output_dtypes_;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(window_size_, window_shift_, window_stride_,
drop_remainder_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
int64_t cardinality = 0;
if (drop_remainder_) {
int64_t rest_elements = n - ((window_size_ - 1) * window_stride_ + 1);
cardinality = rest_elements < 0 ? 0 : rest_elements / window_shift_ + 1;
} else {
cardinality = n / window_shift_ + (n % window_shift_ == 0 ? 0 : 1);
}
return cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* window_size_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_size_, &window_size_node));
Node* window_shift_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_shift_, &window_shift_node));
Node* window_stride_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_stride_, &window_stride_node));
Node* drop_remainder_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{input_graph_node, window_size_node, window_shift_node,
window_stride_node, drop_remainder_node},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const int64_t window_size = dataset()->window_size_;
const int64_t window_shift = dataset()->window_shift_;
const int64_t window_stride = dataset()->window_stride_;
std::vector<std::vector<Tensor>> window_elements;
Status status = absl::OkStatus();
{
const size_t target_size = TargetBufferSize(window_size, window_stride);
mutex_lock l(mu_);
if (!input_impl_ &&
(buffer_.empty() ||
(dataset()->drop_remainder_ && buffer_.size() < target_size))) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (input_impl_) {
*end_of_sequence = false;
for (size_t i = buffer_.size(); i < target_size && !*end_of_sequence;
++i) {
std::vector<Tensor> element;
Status status =
input_impl_->GetNext(ctx, &element, end_of_sequence);
if (!*end_of_sequence) {
RecordBufferEnqueue(ctx, element);
buffer_.emplace_back(std::move(element), status);
} else {
input_impl_.reset();
}
}
}
if (buffer_.empty() ||
(dataset()->drop_remainder_ && buffer_.size() < target_size)) {
DCHECK(*end_of_sequence);
return absl::OkStatus();
}
int num_elements = 1 + (buffer_.size() - 1) / window_stride;
window_elements.reserve(num_elements);
for (size_t i = 0; i < num_elements; ++i) {
status.Update(buffer_[window_stride * i].status);
if (!status.ok()) {
break;
}
window_elements.emplace_back(buffer_[window_stride * i].result);
}
int buffer_size = buffer_.size();
if (window_shift >= buffer_size) {
for (size_t i = buffer_size; input_impl_ && i < window_shift; ++i) {
bool end_of_input;
std::vector<Tensor> element;
input_impl_->GetNext(ctx, &element, &end_of_input).IgnoreError();
if (end_of_input) {
input_impl_.reset();
}
}
for (size_t i = 0; i < buffer_.size(); ++i) {
RecordBufferDequeue(ctx, buffer_.at(i).result);
}
buffer_.clear();
} else {
for (size_t i = 0; i < window_shift; ++i) {
RecordBufferDequeue(ctx, buffer_.at(i).result);
}
buffer_.erase(buffer_.begin(), buffer_.begin() + window_shift);
}
}
if (!status.ok()) {
return status;
}
const size_t num_tuple_components = window_elements[0].size();
const int64_t num_window_elements = window_elements.size();
*end_of_sequence = false;
for (size_t idx = 0; idx < num_tuple_components; ++idx) {
DatasetBase* window_dataset;
std::vector<std::vector<Tensor>> window_component_elements;
window_component_elements.reserve(num_window_elements);
for (size_t i = 0; i < num_window_elements; ++i) {
std::vector<Tensor> component_element;
component_element.push_back(std::move(window_elements[i][idx]));
window_component_elements.push_back(component_element);
}
DataTypeVector output_types({dataset()->input_->output_dtypes()[idx]});
std::vector<PartialTensorShape> output_shapes(
{dataset()->input_->output_shapes()[idx]});
TF_RETURN_IF_ERROR(NewWindow(window_component_elements, output_types,
output_shapes, &window_dataset));
out_tensors->emplace_back(DT_VARIANT, TensorShape({}));
TF_RETURN_IF_ERROR(
StoreDatasetInVariantTensor(window_dataset, &out_tensors->back()));
}
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
dataset()->window_shift_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (!input_impl_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputImplEmpty, ""));
} else {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBufferSize, buffer_.size()));
for (int64_t i = 0; i < buffer_.size(); i++) {
TF_RETURN_IF_ERROR(WriteStatusLocked(writer, i, buffer_[i].status));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kBuffer, "[", i, "]", kSizeSuffix),
buffer_[i].result.size()));
for (int64_t j = 0; j < buffer_[i].result.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kBuffer, "[", i, "][", j, "]"),
buffer_[i].result[j]));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (!reader->Contains(prefix(), kInputImplEmpty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
int64_t buffer_size = 0;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBufferSize, &buffer_size));
buffer_.resize(buffer_size);
for (int64_t i = 0; i < buffer_size; i++) {
int64_t vector_size;
TF_RETURN_IF_ERROR(ReadStatusLocked(reader, i, &buffer_[i].status));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kBuffer, "[", i, "]", kSizeSuffix),
&vector_size));
buffer_[i].result.resize(vector_size);
for (int64_t j = 0; j < vector_size; j++) {
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), prefix(),
strings::StrCat(kBuffer, "[", i, "][", j, "]"),
&buffer_[i].result[j]));
}
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
struct InvocationResult {
InvocationResult() = default;
InvocationResult(std::vector<Tensor>&& result, const Status& status)
: result(result), status(status) {}
std::vector<Tensor> result;
Status status;
};
Status WriteStatusLocked(IteratorStateWriter* writer, size_t index,
const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), CodeKey(index), static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), ErrorMessageKey(index),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader, size_t index,
Status* status) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), CodeKey(index), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), ErrorMessageKey(index),
&error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
string CodeKey(size_t index) {
return strings::StrCat(kBuffer, "[", index, "]", kCodeSuffix);
}
string ErrorMessageKey(size_t index) {
return strings::StrCat(kBuffer, "[", index, "]", kErrorMessage);
}
size_t TargetBufferSize(int64_t window_size, int64_t window_stride) {
return (window_size - 1) * window_stride + 1;
}
mutex mu_;
std::deque<InvocationResult> buffer_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
const int64_t window_size_;
const int64_t window_shift_;
const int64_t window_stride_;
const bool drop_remainder_;
const DataTypeVector output_dtypes_;
const std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
};
WindowDatasetOp::WindowDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void WindowDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t window_size = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSize, &window_size));
OP_REQUIRES(
ctx, window_size > 0,
errors::InvalidArgument("Window size must be greater than zero."));
int64_t window_shift = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kShift, &window_shift));
OP_REQUIRES(
ctx, window_shift > 0,
errors::InvalidArgument("Window shift must be greater than zero."));
int64_t window_stride = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kStride, &window_stride));
OP_REQUIRES(
ctx, window_stride > 0,
errors::InvalidArgument("Window stride must be greater than zero."));
bool drop_remainder;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
*output = new Dataset(ctx, input, window_size, window_shift, window_stride,
drop_remainder);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("WindowDataset").Device(DEVICE_CPU),
WindowDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/window_dataset_op.h"
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "window_dataset";
class WindowDatasetParams : public DatasetParams {
public:
template <typename T>
WindowDatasetParams(T input_dataset_params, int64_t size, int64_t shift,
int64_t stride, bool drop_remainder,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
size_(size),
shift_(shift),
stride_(stride),
drop_remainder_(drop_remainder) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {size_}),
CreateTensor<int64_t>(TensorShape({}), {shift_}),
CreateTensor<int64_t>(TensorShape({}), {stride_}),
CreateTensor<bool>(TensorShape({}), {drop_remainder_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(WindowDatasetOp::kInputDataset);
input_names->emplace_back(WindowDatasetOp::kSize);
input_names->emplace_back(WindowDatasetOp::kShift);
input_names->emplace_back(WindowDatasetOp::kStride);
input_names->emplace_back(WindowDatasetOp::kDropRemainder);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return WindowDatasetOp::kDatasetType; }
private:
int64_t size_;
int64_t shift_;
int64_t stride_;
bool drop_remainder_;
};
class WindowDatasetOpTest : public DatasetOpsTestBase {};
WindowDatasetParams WindowDatasetParams1() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
1,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams2() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams3() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
8,
3,
1,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams4() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
8,
3,
1,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams5() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
8,
1,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams6() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
8,
1,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams7() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
8,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams8() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
8,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams9() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
4,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams10() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
5,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParamsWithInvalidWindowSize() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
0,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParamswithInvalidWindowShift() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
0,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParamsWithInvalidWindowStride() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
0,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
template <typename T>
struct GetNextTestCase {
T dataset_params;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<GetNextTestCase<WindowDatasetParams>> GetNextTestCases() {
return {{WindowDatasetParams1(),
{CreateTensors<int64_t>(TensorShape{}, {{0}, {1}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {3}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}}),
CreateTensors<int64_t>(TensorShape{}, {{6}})}},
{WindowDatasetParams2(),
{CreateTensors<int64_t>(TensorShape{}, {{0}, {2}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {4}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {6}})}},
{WindowDatasetParams3(),
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams4(),
{}},
{WindowDatasetParams5(),
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams6(),
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams7(),
{CreateTensors<int64_t>(TensorShape({}), {{0}}),
CreateTensors<int64_t>(TensorShape({}), {{2}}),
CreateTensors<int64_t>(TensorShape({}), {{4}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams8(),
{}},
{WindowDatasetParams9(),
{CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}})}},
{WindowDatasetParams10(),
{}}};
}
class ParameterizedGetNextTest : public WindowDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<WindowDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
auto expected_outputs_it = test_case.expected_outputs.begin();
while (!end_of_sequence) {
std::vector<Tensor> out_tensors;
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& window_dataset_tensor : out_tensors) {
DatasetBase* window_dataset;
TF_ASSERT_OK(GetDatasetFromVariantTensor(window_dataset_tensor,
&window_dataset));
std::unique_ptr<IteratorBase> window_dataset_iterator;
TF_ASSERT_OK(window_dataset->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(),
&window_dataset_iterator));
bool end_of_window_dataset = false;
std::vector<Tensor> window_elements;
while (!end_of_window_dataset) {
std::vector<Tensor> next_element;
TF_EXPECT_OK(window_dataset_iterator->GetNext(
iterator_ctx_.get(), &next_element, &end_of_window_dataset));
window_elements.insert(window_elements.end(), next_element.begin(),
next_element.end());
}
EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(window_elements, *expected_outputs_it, false));
expected_outputs_it++;
}
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
INSTANTIATE_TEST_CASE_P(WindowDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(WindowDatasetOpTest, DatasetTypeString) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(WindowDatasetOp::kDatasetType)));
}
TEST_F(WindowDatasetOpTest, DatasetNodeName) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(WindowDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(WindowDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<WindowDatasetParams>>
DatasetCardinalityTestCases() {
return {{WindowDatasetParams1(),
4},
{WindowDatasetParams2(),
3},
{WindowDatasetParams3(),
3},
{WindowDatasetParams4(),
0},
{WindowDatasetParams5(),
1},
{WindowDatasetParams6(),
1},
{WindowDatasetParams7(),
4},
{WindowDatasetParams8(),
0},
{WindowDatasetParams9(),
1},
{WindowDatasetParams10(),
0}};
}
DATASET_CARDINALITY_TEST_P(WindowDatasetOpTest, WindowDatasetParams,
DatasetCardinalityTestCases())
TEST_F(WindowDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(WindowDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(WindowDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
WindowDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
template <typename T>
struct IteratorSaveAndRestoreTestCase {
T dataset_params;
std::vector<int> breakpoints;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<IteratorSaveAndRestoreTestCase<WindowDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{WindowDatasetParams1(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape{}, {{0}, {1}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {3}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}}),
CreateTensors<int64_t>(TensorShape{}, {{6}})}},
{WindowDatasetParams2(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape{}, {{0}, {2}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {4}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {6}})}},
{WindowDatasetParams3(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams4(),
{0, 1, 9},
{}},
{WindowDatasetParams5(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams6(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams7(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}}),
CreateTensors<int64_t>(TensorShape({}), {{2}}),
CreateTensors<int64_t>(TensorShape({}), {{4}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams8(),
{0, 1, 9},
{}},
{WindowDatasetParams9(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}})}},
{WindowDatasetParams10(),
{0, 1, 9},
{}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public WindowDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<WindowDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, IteratorSaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
std::unique_ptr<SerializationContext> window_serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&window_serialization_ctx));
bool end_of_sequence = false;
auto expected_outputs_it = test_case.expected_outputs.begin();
int cur_iteration = 0;
for (int breakpoint : test_case.breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
while (!end_of_sequence) {
std::vector<Tensor> out_tensors;
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& window_dataset_tensor : out_tensors) {
DatasetBase* window_dataset;
TF_ASSERT_OK(GetDatasetFromVariantTensor(window_dataset_tensor,
&window_dataset));
std::unique_ptr<IteratorBase> window_dataset_iterator;
TF_ASSERT_OK(window_dataset->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(),
&window_dataset_iterator));
bool end_of_window_dataset = false;
std::vector<Tensor> window_elements;
VariantTensorDataWriter window_dataset_writer;
std::vector<const VariantTensorData*> window_dataset_data;
TF_EXPECT_OK(window_dataset_iterator->Save(
window_serialization_ctx.get(), &window_dataset_writer));
window_dataset_writer.GetData(&window_dataset_data);
VariantTensorDataReader window_reader(window_dataset_data);
while (!end_of_window_dataset) {
std::vector<Tensor> next_element;
TF_EXPECT_OK(window_dataset_iterator->GetNext(
iterator_ctx_.get(), &next_element, &end_of_window_dataset));
}
TF_EXPECT_OK(
RestoreIterator(iterator_ctx_.get(), &window_reader,
test_case.dataset_params.iterator_prefix(),
*window_dataset, &window_dataset_iterator));
end_of_window_dataset = false;
while (!end_of_window_dataset) {
std::vector<Tensor> next_element;
TF_EXPECT_OK(window_dataset_iterator->GetNext(
iterator_ctx_.get(), &next_element, &end_of_window_dataset));
window_elements.insert(window_elements.end(),
next_element.begin(), next_element.end());
}
EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(
ExpectEqual(window_elements, *expected_outputs_it, false));
expected_outputs_it++;
}
}
}
cur_iteration++;
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
INSTANTIATE_TEST_CASE_P(WindowDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(WindowDatasetOpTest, InvalidArguments) {
std::vector<WindowDatasetParams> dataset_params_vec(
{WindowDatasetParamsWithInvalidWindowSize(),
WindowDatasetParamswithInvalidWindowShift(),
WindowDatasetParamsWithInvalidWindowStride()});
for (const auto& dataset_params : dataset_params_vec) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/window_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/window_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a516fea-af24-401e-b978-de5e7f147671 | cpp | tensorflow/tensorflow | tensor_dataset_op | tensorflow/core/kernels/data/tensor_dataset_op.cc | tensorflow/core/kernels/data/tensor_dataset_op_test.cc | #include "tensorflow/core/kernels/data/tensor_dataset_op.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const TensorDatasetOp::kDatasetType;
constexpr const char* const TensorDatasetOp::kComponents;
constexpr const char* const TensorDatasetOp::kToutput_types;
constexpr const char* const TensorDatasetOp::kOutputShapes;
constexpr char kFromTensor[] = "FromTensor";
constexpr char kProduced[] = "produced";
class TensorDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors)
: DatasetBase(DatasetContext(ctx)), tensors_(std::move(tensors)) {
dtypes_.reserve(tensors_.size());
shapes_.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
dtypes_.push_back(t.dtype());
shapes_.emplace_back(t.shape().dim_sizes());
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kFromTensor, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(std::make_unique<IndexSplitProvider>(1));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return 1LL;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
*out_tensors = tensors_;
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> components;
components.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
components.emplace_back(node);
}
AttrValue dtypes;
b->BuildAttrValue(dtypes_, &dtypes);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}},
{{kToutput_types, dtypes}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
produced_(false),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (!ctx->split_providers().empty()) {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
mutex_lock l(mu_);
if (split_provider_) {
bool end_of_splits;
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, &end_of_splits));
if (end_of_splits) {
produced_ = true;
}
}
if (!produced_) {
*out_tensors = dataset()->tensors_;
produced_ = true;
*end_of_sequence = false;
return absl::OkStatus();
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kProduced,
static_cast<int64_t>(produced_)));
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
mutex_lock l(mu_);
int64_t produced;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kProduced, &produced));
produced_ = static_cast<bool>(produced);
return absl::OkStatus();
}
private:
mutex mu_;
std::shared_ptr<SplitProvider> split_provider_;
bool produced_ TF_GUARDED_BY(mu_);
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
DataTypeVector dtypes_;
std::vector<PartialTensorShape> shapes_;
};
TensorDatasetOp::TensorDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutput_types, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void TensorDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs));
std::vector<Tensor> components(inputs.begin(), inputs.end());
*output = new Dataset(ctx, std::move(components));
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TensorDataset").Device(DEVICE_CPU),
TensorDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tensor_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_dataset";
class TensorDatasetParams : public DatasetParams {
public:
TensorDatasetParams(std::vector<Tensor> components, string node_name)
: DatasetParams(TensorDtypes(components), TensorShapes(components),
std::move(node_name)),
components_(std::move(components)) {}
std::vector<Tensor> GetInputTensors() const override { return components_; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(components_.size());
for (int i = 0; i < components_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(TensorDatasetOp::kComponents, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"Toutput_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return TensorDatasetOp::kDatasetType; }
private:
DataTypeVector TensorDtypes(const std::vector<Tensor>& input_components) {
DataTypeVector dtypes;
for (const auto& component : input_components) {
dtypes.emplace_back(component.dtype());
}
return dtypes;
}
std::vector<PartialTensorShape> TensorShapes(
const std::vector<Tensor>& input_components) {
std::vector<PartialTensorShape> shapes;
for (const auto& component : input_components) {
shapes.emplace_back(component.shape());
}
return shapes;
}
public:
std::vector<Tensor> components_;
};
class TensorDatasetOpTest : public DatasetOpsTestBase {};
std::vector<Tensor> PlainTensors() {
return {CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3}),
CreateTensor<double>(TensorShape({}), {37.0}),
CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})};
}
TensorDatasetParams PlainTensorDatasetParams() {
return {PlainTensors(),
kNodeName};
}
TensorDatasetParams NestedTensorDatasetParams() {
return {
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})},
kNodeName};
}
std::vector<GetNextTestCase<TensorDatasetParams>> GetNextTestCases() {
return {{PlainTensorDatasetParams(),
PlainTensors()},
{NestedTensorDatasetParams(),
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})}}};
}
class ParameterizedGetNextTest : public TensorDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<TensorDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
EXPECT_EQ(out_tensors.size(), test_case.expected_outputs.size());
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i].scalar<Variant>()().get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(out_tensors[i], test_case.expected_outputs[i]));
}
}
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence));
EXPECT_TRUE(end_of_sequence);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_CASE_P(TensorDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(TensorDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TensorDatasetOp::kDatasetType)));
}
TEST_F(TensorDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TensorDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(TensorDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
TEST_F(TensorDatasetOpTest, Cardinality) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(1));
}
TEST_F(TensorDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(TensorDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(TensorDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
"FromTensor", dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TensorDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{PlainTensorDatasetParams(),
{0, 1, 2},
PlainTensors()},
{NestedTensorDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public TensorDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<TensorDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
int cardinality = 1;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
if (breakpoint >= cardinality) {
EXPECT_TRUE(end_of_sequence);
} else {
EXPECT_FALSE(end_of_sequence);
}
}
EXPECT_EQ(out_tensors.size(), test_case.expected_outputs.size());
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i].scalar<Variant>()().get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(out_tensors[i], test_case.expected_outputs[i]));
}
}
}
INSTANTIATE_TEST_CASE_P(TensorDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(TensorDatasetOpTest, Splitting) {
auto params = PlainTensorDatasetParams();
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, PlainTensors()));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 2,
CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 0,
PlainTensors()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3b477b1b-3671-4769-aaca-1b58b7b1bac7 | cpp | tensorflow/tensorflow | prefetch_dataset_op | tensorflow/core/kernels/data/prefetch_dataset_op.cc | tensorflow/core/kernels/data/prefetch_dataset_op_test.cc | #include "tensorflow/core/kernels/data/prefetch_dataset_op.h"
#include <algorithm>
#include <deque>
#include <limits>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/prefetch_autotuner.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/mutex.h"
namespace tensorflow {
namespace data {
constexpr const char* const PrefetchDatasetOp::kDatasetType;
constexpr const char* const PrefetchDatasetOp::kInputDataset;
constexpr const char* const PrefetchDatasetOp::kBufferSize;
constexpr const char* const PrefetchDatasetOp::kOutputTypes;
constexpr const char* const PrefetchDatasetOp::kOutputShapes;
constexpr const char* const PrefetchDatasetOp::kSlackPeriod;
constexpr const char* const PrefetchDatasetOp::kLegacyAutotune;
constexpr const char* const PrefetchDatasetOp::kBufferSizeMin;
namespace {
constexpr double kSleepFactor = 0.2;
constexpr char kBuffer[] = "buffer";
constexpr char kStatus[] = "status";
constexpr char kSizeSuffix[] = ".size";
constexpr char kCodeSuffix[] = ".code";
constexpr char kErrorMessageSuffix[] = ".error_message";
}
class PrefetchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t slack_period, bool legacy_autotune, int64_t buffer_size_min)
: DatasetBase(DatasetContext(ctx)),
input_(input),
buffer_size_(buffer_size),
slack_period_(slack_period),
legacy_autotune_(legacy_autotune),
buffer_size_min_(buffer_size_min) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return input_->Get(ctx, index, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size));
AttrValue slack_period_attr;
b->BuildAttrValue(slack_period_, &slack_period_attr);
AttrValue legacy_autotune_attr;
b->BuildAttrValue(legacy_autotune_, &legacy_autotune_attr);
AttrValue buffer_size_min_attr;
b->BuildAttrValue(buffer_size_min_, &buffer_size_min_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, buffer_size},
{std::make_pair(kSlackPeriod, slack_period_attr),
std::make_pair(kLegacyAutotune, legacy_autotune_attr),
std::make_pair(kBufferSizeMin, buffer_size_min_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
buffer_size_min_(params.dataset->buffer_size_min_),
legacy_autotune_(params.dataset->legacy_autotune_),
buffer_size_(std::make_shared<model::SharedState>(
legacy_autotune_ ? 0 : params.dataset->buffer_size_, mu_,
cond_var_)) {
slack_us_ = 0;
}
~Iterator() override {
CancelThreads();
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
auto_tuner_ = std::make_unique<PrefetchAutotuner>(
dataset()->buffer_size_, dataset()->buffer_size_min_,
ctx->ram_budget_manager());
interleave_depth_ = ctx->interleave_depth();
if (buffer_size_->value == model::kAutotune) {
buffer_size_->value = buffer_size_min_;
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(), [this]() { CancelThreads(); },
&deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
IteratorContext iter_ctx(params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&iter_ctx, this, prefix(), &input_impl_));
if (ctx->warm_start() && !ctx->is_restoring()) {
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
}
ctx->MergeCheckpoint(iter_ctx.checkpoint());
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const auto& stats_aggregator = ctx->stats_aggregator();
{
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
while (buffer_.empty() && !prefetch_thread_finished_ &&
buffer_limit() != 0) {
if (legacy_autotune_) {
auto_tuner_->RecordEmpty();
buffer_size_->value = auto_tuner_->buffer_limit();
}
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (!buffer_.empty()) {
return Consume(ctx, out_tensors, end_of_sequence);
}
if (prefetch_thread_finished_) {
*end_of_sequence = true;
return absl::OkStatus();
}
DCHECK_EQ(buffer_limit(), 0);
}
mutex_lock input_l(input_mu_);
{
mutex_lock l(*mu_);
if (stats_aggregator) {
stats_aggregator->AddScalar(
stats_utils::BufferSizeScalarName(dataset()->node_name()),
static_cast<float>(buffer_.size()), num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferCapacityScalarName(dataset()->node_name()),
static_cast<float>(buffer_limit()), num_elements());
}
}
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
double buffer_size_min = buffer_size_min_;
double buffer_size_max = std::numeric_limits<int64_t>::max();
if (buffer_size_->value != model::kAutotune && buffer_size_->value != 0) {
buffer_size_min = buffer_size_->value;
buffer_size_max = buffer_size_->value;
}
return model::MakeAsyncKnownRatioNode(
std::move(args),
1,
{model::MakeParameter(kBufferSize, buffer_size_, buffer_size_min,
buffer_size_max)},
legacy_autotune_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
mutex_lock input_l(input_mu_);
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBufferSize, buffer_.size()));
for (size_t i = 0; i < buffer_.size(); i++) {
auto& buffer_element = buffer_[i];
TF_RETURN_IF_ERROR(WriteStatus(writer, i, buffer_element.status));
if (buffer_element.status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, kSizeSuffix), buffer_element.value.size()));
for (size_t j = 0; j < buffer_element.value.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, "[", j, "]"), buffer_element.value[j]));
}
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock input_l(input_mu_);
mutex_lock l(*mu_);
DCHECK(!prefetch_thread_);
DCHECK(buffer_.empty());
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
if (!ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(RestoreBuffer(ctx, reader));
}
if (ctx->warm_start()) {
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
}
cond_var_->notify_all();
return absl::OkStatus();
}
data::TraceMeMetadata GetTraceMeMetadata() const override {
int64_t limit = -1, size = -1;
data::TraceMeMetadata result;
if (mu_->try_lock()) {
limit = buffer_limit();
size = buffer_.size();
if (!buffer_.empty()) {
std::vector<std::string> shapes(buffer_.front().value.size());
for (const auto& component : buffer_.front().value) {
shapes.push_back(component.shape().DebugString());
}
result.push_back(std::make_pair("next_element_shapes",
absl::StrJoin(shapes, ",")));
}
mu_->unlock();
}
result.push_back(std::make_pair(
"buffer_limit",
limit == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(limit))));
result.push_back(std::make_pair(
"autotune",
dataset()->buffer_size_ == model::kAutotune ? "true" : "false"));
result.push_back(std::make_pair(
"autotune_mode", legacy_autotune_ ? "legacy" : "performance"));
if (dataset()->slack_period_ > 0) {
result.push_back(std::make_pair(
"slack",
strings::Printf("%lld", static_cast<long long>(slack_us_.load()))));
}
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct BufferElement {
explicit BufferElement(IteratorContext* ctx)
: uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
Status status;
std::vector<Tensor> value;
int64_t created_us;
const uint64 uid;
MemoryCheckpoint checkpoint;
};
Status RestoreBuffer(IteratorContext* const ctx,
IteratorStateReader* const reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
size_t buffer_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kBufferSize, &temp));
buffer_size = static_cast<size_t>(temp);
}
for (size_t i = 0; i < buffer_size; i++) {
buffer_.emplace_back(ctx);
auto& buffer_element = buffer_.back();
TF_RETURN_IF_ERROR(ReadStatus(reader, i, &buffer_element.status));
if (buffer_element.status.ok()) {
size_t value_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, kSizeSuffix), &temp));
value_size = static_cast<size_t>(temp);
}
buffer_element.value.reserve(value_size);
for (size_t j = 0; j < value_size; j++) {
buffer_element.value.emplace_back();
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, "[", j, "]"),
&buffer_element.value.back()));
}
}
RecordBufferEnqueue(ctx, buffer_element.value);
}
return absl::OkStatus();
}
int64_t buffer_limit() const TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (legacy_autotune_) {
return auto_tuner_->buffer_limit();
}
return buffer_size_->value;
}
void CancelThreads() TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
}
Status Consume(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const auto& stats_aggregator = ctx->stats_aggregator();
if (stats_aggregator) {
double buffer_limit_ = buffer_limit();
stats_aggregator->AddToHistogram(
stats_utils::BufferUtilizationHistogramName(dataset()->node_name()),
{static_cast<float>(buffer_.size()) /
static_cast<float>(buffer_limit_)},
num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferSizeScalarName(dataset()->node_name()),
static_cast<float>(buffer_.size()), num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferCapacityScalarName(dataset()->node_name()),
static_cast<float>(buffer_limit_), num_elements());
}
Status s = buffer_.front().status;
if (s.ok()) {
int64_t buffer_element_id = buffer_.front().uid;
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"PrefetchConsume", {{"element_id", buffer_element_id}});
},
profiler::kInfo);
if (dataset()->slack_period_ > 0 &&
(num_elements() + 1) % dataset()->slack_period_ == 0) {
int64_t slack_us = EnvTime::NowMicros() - buffer_.front().created_us;
slack_us_ = kSleepFactor * slack_us_ + slack_us;
VLOG(2) << "Setting slack_us_: " << slack_us_;
}
*out_tensors = std::move(buffer_.front().value);
ctx->MergeCheckpoint(&buffer_.front().checkpoint);
RecordBufferDequeue(ctx, *out_tensors);
if (legacy_autotune_ && !auto_tuner_->HasElementSize()) {
auto_tuner_->SetElementSize(GetAllocatedBytes(*out_tensors));
}
} else {
RecordBufferDequeue(ctx, buffer_.front().value);
}
if (legacy_autotune_) {
auto_tuner_->RecordConsumption(buffer_.size());
buffer_size_->value = auto_tuner_->buffer_limit();
}
buffer_.pop_front();
*end_of_sequence = false;
cond_var_->notify_all();
return s;
}
Status EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!prefetch_thread_) {
std::shared_ptr<IteratorContext> new_ctx =
std::make_shared<IteratorContext>(*ctx);
prefetch_thread_ = ctx->StartThread(
"tf_data_prefetch", [this, new_ctx]() { PrefetchThread(new_ctx); });
}
return absl::OkStatus();
}
void PrefetchThread(const std::shared_ptr<IteratorContext>& ctx) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
int num_produced = 0;
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && buffer_.size() >= buffer_limit()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
prefetch_thread_finished_ = true;
cond_var_->notify_all();
return;
}
}
if (dataset()->slack_period_ > 0 &&
num_produced % dataset()->slack_period_ == 0) {
VLOG(2) << "Sleeping for: " << slack_us_ * kSleepFactor;
ctx->env()->SleepForMicroseconds(slack_us_ * kSleepFactor);
}
mutex_lock input_l(input_mu_);
bool end_of_sequence = false;
BufferElement buffer_element(ctx.get());
{
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"PrefetchProduce", {{"element_id", buffer_element.uid}});
},
profiler::kInfo);
buffer_element.status = input_impl_->GetNext(
ctx.get(), &buffer_element.value, &end_of_sequence);
buffer_element.checkpoint.Merge(ctx->checkpoint());
}
if (buffer_element.status.ok() && end_of_sequence) {
mutex_lock l(*mu_);
prefetch_thread_finished_ = true;
cond_var_->notify_all();
return;
}
{
mutex_lock l(*mu_);
RecordBufferEnqueue(ctx.get(), buffer_element.value);
buffer_element.created_us = EnvTime::NowMicros();
buffer_.push_back(std::move(buffer_element));
cond_var_->notify_all();
}
++num_produced;
}
}
Status WriteStatus(IteratorStateWriter* writer, size_t index,
const Status& status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(absl::StrCat(prefix(), "::", index), CodeKey(),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
absl::StrCat(prefix(), "::", index), ErrorMessageKey(),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatus(IteratorStateReader* reader, size_t index, Status* status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(absl::StrCat(prefix(), "::", index),
CodeKey(), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", index),
ErrorMessageKey(), &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
string CodeKey() { return absl::StrCat(kStatus, kCodeSuffix); }
string ErrorMessageKey() {
return absl::StrCat(kStatus, kErrorMessageSuffix);
}
const std::shared_ptr<mutex> mu_;
mutex input_mu_ TF_ACQUIRED_BEFORE(*mu_);
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(input_mu_);
const std::shared_ptr<condition_variable> cond_var_;
const int64_t buffer_size_min_;
std::unique_ptr<PrefetchAutotuner> auto_tuner_ TF_GUARDED_BY(*mu_);
std::deque<BufferElement> buffer_ TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
bool prefetch_thread_finished_ TF_GUARDED_BY(*mu_) = false;
const bool legacy_autotune_;
std::atomic<int64_t> slack_us_;
const std::shared_ptr<model::SharedState> buffer_size_;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> prefetch_thread_ TF_GUARDED_BY(*mu_);
};
const DatasetBase* const input_;
const int64_t buffer_size_;
const int64_t slack_period_;
const bool legacy_autotune_ = true;
const int64_t buffer_size_min_ = 0;
absl::Status random_indexing_compatible_;
TraceMeMetadata traceme_metadata_;
};
PrefetchDatasetOp::PrefetchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kSlackPeriod)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSlackPeriod, &slack_period_));
}
if (ctx->HasAttr(kLegacyAutotune)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kLegacyAutotune, &legacy_autotune_));
}
if (ctx->HasAttr(kBufferSizeMin)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kBufferSizeMin, &buffer_size_min_));
}
if (GetExperiments().contains("autotune_buffer_optimization")) {
legacy_autotune_ = false;
buffer_size_min_ = std::max(static_cast<int64_t>(1), buffer_size_min_);
}
}
void PrefetchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t buffer_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(ctx, buffer_size >= 0 || buffer_size == model::kAutotune,
errors::InvalidArgument("buffer_size must be >= 0 or set "
"buffer_size to be ",
model::kAutotune, " for auto-tuning"));
if (buffer_size == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, buffer_size, slack_period_,
legacy_autotune_, buffer_size_min_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset").Device(DEVICE_CPU).Priority(2),
PrefetchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset")
.Device(DEVICE_GPU)
.HostMemory("buffer_size")
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
PrefetchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/prefetch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "prefetch_dataset";
class PrefetchDatasetOpTest : public DatasetOpsTestBase {};
class PrefetchDatasetParams : public DatasetParams {
public:
template <typename T>
PrefetchDatasetParams(T input_dataset_params, int64_t buffer_size,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int64_t slack_period, bool legacy_autotune,
int64_t buffer_size_min, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
buffer_size_(buffer_size),
slack_period_(slack_period),
legacy_autotune_(legacy_autotune),
buffer_size_min_(buffer_size_min) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {buffer_size_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(PrefetchDatasetOp::kInputDataset);
input_names->emplace_back(PrefetchDatasetOp::kBufferSize);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("slack_period", slack_period_);
attr_vector->emplace_back("legacy_autotune", legacy_autotune_);
attr_vector->emplace_back("buffer_size_min", buffer_size_min_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return PrefetchDatasetOp::kDatasetType;
}
private:
int64_t buffer_size_;
int64_t slack_period_;
bool legacy_autotune_;
int64_t buffer_size_min_;
};
PrefetchDatasetParams PrefetchDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
5,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
0,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
5,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
5,
false,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
3,
kNodeName);
}
PrefetchDatasetParams InvalidBufferSizePrefetchDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-2,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
std::vector<GetNextTestCase<PrefetchDatasetParams>> GetNextTestCases() {
return {
{PrefetchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{PrefetchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams3(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams5(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams6(),
CreateTensors<int64_t>(
TensorShape{1},
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})}};
}
ITERATOR_GET_NEXT_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
GetNextTestCases())
TEST_F(PrefetchDatasetOpTest, DatasetNodeName) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(PrefetchDatasetOpTest, DatasetTypeString) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(PrefetchDatasetOp::kDatasetType)));
}
TEST_F(PrefetchDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(PrefetchDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<PrefetchDatasetParams>> CardinalityTestCases() {
return {{PrefetchDatasetParams1(),
10},
{PrefetchDatasetParams2(),
10},
{PrefetchDatasetParams3(),
10},
{PrefetchDatasetParams4(),
10},
{PrefetchDatasetParams5(),
10}};
}
DATASET_CARDINALITY_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
CardinalityTestCases())
TEST_F(PrefetchDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(PrefetchDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(PrefetchDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
PrefetchDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<PrefetchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PrefetchDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{PrefetchDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams5(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1},
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(PrefetchDatasetOpTest, InvalidBufferSize) {
auto dataset_params = InvalidBufferSizePrefetchDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(), error::INVALID_ARGUMENT);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/prefetch_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/prefetch_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5968201d-7ca3-4c7c-856a-00f65586c49f | cpp | tensorflow/tensorflow | shard_dataset_op | tensorflow/core/kernels/data/shard_dataset_op.cc | tensorflow/core/kernels/data/shard_dataset_op_test.cc | #include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShardDatasetOp::kDatasetType;
constexpr const char* const ShardDatasetOp::kInputDataset;
constexpr const char* const ShardDatasetOp::kNumShards;
constexpr const char* const ShardDatasetOp::kIndex;
constexpr const char* const ShardDatasetOp::kRequireNonEmpty;
constexpr const char* const ShardDatasetOp::kOutputTypes;
constexpr const char* const ShardDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kNextIndex[] = "next_index";
constexpr char kFileShardErrorMessage[] =
"If you are using datasets with distribution strategy, consider setting "
"the auto sharding policy to either DATA or OFF using the "
"`experimental_distribute.auto_shard_policy` option of `tf.data.Options()`."
" Or, split your input files into a larger number of small files such that "
"number of files is greater than number of shards/workers.";
class ShardDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t num_shards, int64_t index,
bool require_non_empty, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)),
num_shards_(num_shards),
index_(index),
input_(input),
require_non_empty_(require_non_empty),
traceme_metadata_(
{{"index", strings::Printf("%lld", static_cast<long long>(index))},
{"num_shards",
strings::Printf("%lld", static_cast<long long>(num_shards))}}) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(num_shards_, index_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / num_shards_ + (index_ < n % num_shards_ ? 1 : 0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index_ + (num_shards_ * index), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* num_shards = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(num_shards_, &num_shards));
Node* index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(index_, &index));
AttrValue require_non_empty_attr;
b->BuildAttrValue(require_non_empty_, &require_non_empty_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, num_shards, index},
{{kRequireNonEmpty, require_non_empty_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), next_index_(0), element_count_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (dataset()->num_shards_ == kShardHint) {
return errors::FailedPrecondition(
"`tf.data.Dataset.shard(SHARD_HINT, ...)` can only be used in "
"`tf.distribute.Strategy.experimental_distribute_dataset()` with "
"`tf.data.experimental.AutoShardPolicy.HINT` policy, or tf.data "
"service with "
"`tf.data.experimental.service.ShardingPolicy.HINT` processing "
"mode.");
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
int num_to_skip =
(dataset()->index_ - next_index_) % dataset()->num_shards_;
if (num_to_skip < 0) {
num_to_skip += dataset()->num_shards_;
}
int num_skipped;
TF_RETURN_IF_ERROR(
input_impl_->Skip(ctx, num_to_skip, end_of_sequence, &num_skipped));
next_index_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &result, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
next_index_++;
if (dataset()->require_non_empty_ &&
next_index_ < dataset()->num_shards_) {
int num_skipped;
Status s = input_impl_->Skip(ctx, dataset()->num_shards_ - next_index_,
end_of_sequence, &num_skipped);
if (*end_of_sequence || errors::IsOutOfRange(s)) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: the dataset only has ",
next_index_, " file(s), which is not enough for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
} else if (!s.ok()) {
return s;
}
next_index_ = dataset()->num_shards_;
}
*out_tensors = std::move(result);
return absl::OkStatus();
}
Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
auto merge_checkpoint = gtl::MakeCleanup([&ctx_with_index_mapper] {
ctx_with_index_mapper.MergeCheckpoint();
});
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
if (*end_of_sequence && dataset()->require_non_empty_ &&
element_count_ == 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: The dataset does not have "
"enough file(s) for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
}
++element_count_;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t num_shards = dataset()->num_shards_;
int64_t shard_index = dataset()->index_;
return [parent_index_mapper, num_shards,
shard_index](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t output_index,
parent_index_mapper(element_position));
return output_index * num_shards + shard_index;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(
std::move(args), 1.0 / static_cast<double>(dataset()->num_shards_));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
element_count_ = *ctx->restored_element_count();
return RestoreInput(ctx, reader, input_impl_);
}
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kNextIndex, &next_index_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t next_index_ TF_GUARDED_BY(mu_);
size_t element_count_ TF_GUARDED_BY(mu_);
};
const int64_t num_shards_;
const int64_t index_;
const DatasetBase* const input_;
const bool require_non_empty_;
const TraceMeMetadata traceme_metadata_;
absl::Status random_indexing_compatible_;
};
ShardDatasetOp::ShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRequireNonEmpty, &require_non_empty_));
}
void ShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index = 0;
int64_t num_shards = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kNumShards, &num_shards));
OP_REQUIRES(
ctx, num_shards > 0 || num_shards == kShardHint,
errors::InvalidArgument("Number of shards must be greater than zero "
"(currently num_shards = ",
num_shards, ")."));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kIndex, &index));
OP_REQUIRES(
ctx, (index >= 0 && index < num_shards) || num_shards == kShardHint,
errors::InvalidArgument("Index must be between 0 and ", num_shards - 1,
" (currently index = ", index, ")."));
*output = new Dataset(ctx, num_shards, index, require_non_empty_, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ShardDataset").Device(DEVICE_CPU),
ShardDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "shard_dataset";
class ShardDatasetParams : public DatasetParams {
public:
template <typename T>
ShardDatasetParams(T input_dataset_params, int64_t num_shards, int64_t index,
bool require_non_empty, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_shards_(num_shards),
index_(index),
require_non_empty_(require_non_empty) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_shards_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShardDatasetOp::kInputDataset);
input_names->emplace_back(ShardDatasetOp::kNumShards);
input_names->emplace_back(ShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("require_non_empty", require_non_empty_);
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ShardDatasetOp::kDatasetType; }
private:
int64_t num_shards_;
int64_t index_;
bool require_non_empty_;
};
class ShardDatasetOpTest : public DatasetOpsTestBase {};
ShardDatasetParams ShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
0,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
7,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams5() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
4,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams6() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams7() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParamsWithNoElemForEachShard() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ShardDatasetParams>> GetNextTestCases() {
return {
{ShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{}},
{ShardDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_GET_NEXT_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
GetNextTestCases())
TEST_F(ShardDatasetOpTest, DatasetNodeName) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ShardDatasetOpTest, DatasetTypeString) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ShardDatasetOp::kDatasetType)));
}
TEST_F(ShardDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<ShardDatasetParams>> CardinalityTestCases() {
return {{ShardDatasetParams1(),
2},
{ShardDatasetParams2(),
2},
{ShardDatasetParams3(),
0},
{ShardDatasetParams4(),
1},
{ShardDatasetParams5(),
2},
{ShardDatasetParams6(),
2},
{ShardDatasetParams7(),
1}};
}
DATASET_CARDINALITY_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
CardinalityTestCases())
TEST_F(ShardDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ShardDatasetOpTest, IteratorPrefix) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShardDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ShardDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{ShardDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{0, 1},
{}},
{ShardDatasetParams4(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ShardDatasetOpTest, NoElemForEachShard) {
auto dataset_params = InvalidShardDatasetParamsWithNoElemForEachShard();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ShardDatasetOpTest, InvalidArguments) {
std::vector<ShardDatasetParams> invalid_dataset_params = {
InvalidShardDatasetParams1(), InvalidShardDatasetParams2(),
InvalidShardDatasetParams3(), InvalidShardDatasetParams4()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shard_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shard_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
63728d0f-b84d-4e18-bb17-5fb065cf6c33 | cpp | tensorflow/tensorflow | concatenate_dataset_op | tensorflow/core/kernels/data/concatenate_dataset_op.cc | tensorflow/core/kernels/data/concatenate_dataset_op_test.cc | #include "tensorflow/core/kernels/data/concatenate_dataset_op.h"
#include <algorithm>
#include <cstddef>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ConcatenateDatasetOp::kDatasetType;
constexpr const char* const ConcatenateDatasetOp::kInputDataset;
constexpr const char* const ConcatenateDatasetOp::kAnotherDataset;
constexpr const char* const ConcatenateDatasetOp::kOutputTypes;
constexpr const char* const ConcatenateDatasetOp::kOutputShapes;
constexpr char kIndex[] = "i";
constexpr char kInputImplUninitialized[] = "input_impl_uninitialized";
constexpr char kElementCount[] = "element_count";
namespace {
absl::StatusOr<size_t> GetNextShuffledIndex(const IndexMapperFn& index_mapper,
size_t& element_count) {
absl::StatusOr<size_t> shuffled_index = absl::NotFoundError("default");
while (absl::IsNotFound(shuffled_index.status())) {
shuffled_index = index_mapper(element_count++);
if (absl::IsOutOfRange(shuffled_index.status())) {
return shuffled_index.status();
}
if (!absl::IsNotFound(shuffled_index.status()) && !shuffled_index.ok()) {
return shuffled_index.status();
}
}
return shuffled_index;
}
}
class ConcatenateDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, const DatasetBase* input,
const DatasetBase* to_concatenate)
: DatasetBase(DatasetContext(ctx)),
input_(input),
to_concatenate_(to_concatenate),
input_cardinality_(input->Cardinality()),
to_concatenate_cardinality_(to_concatenate_->Cardinality()) {
input_->Ref();
to_concatenate_->Ref();
auto os_input = input->output_shapes();
auto os_concatenate = to_concatenate->output_shapes();
for (int i = 0; i < os_input.size(); i++) {
PartialTensorShape output_tensorshape({});
OP_REQUIRES_OK(ctx,
MostSpecificCompatibleShape(os_input[i], os_concatenate[i],
&output_tensorshape));
output_shapes_.push_back(output_tensorshape);
}
if (input_ != nullptr && !input_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input->RandomIndexingCompatible();
} else if (to_concatenate_ != nullptr &&
!to_concatenate_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = to_concatenate_->RandomIndexingCompatible();
}
}
~Dataset() override {
input_->Unref();
to_concatenate_->Unref();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t input_cardinality = input_->Cardinality(options);
int64_t to_concatenate_cardinality = to_concatenate_->Cardinality(options);
if (input_cardinality == kInfiniteCardinality ||
to_concatenate_cardinality == kInfiniteCardinality) {
return kInfiniteCardinality;
}
if (input_cardinality == kUnknownCardinality ||
to_concatenate_cardinality == kUnknownCardinality) {
return kUnknownCardinality;
}
return input_cardinality + to_concatenate_cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
inputs->push_back(to_concatenate_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(input_->CheckExternalState());
return to_concatenate_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
if (index < input_cardinality_) {
TF_RETURN_IF_ERROR(input_->Get(ctx, index, out_tensors));
} else {
TF_RETURN_IF_ERROR(
to_concatenate_->Get(ctx, index - input_cardinality_, out_tensors));
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph));
Node* to_concatenate_graph = nullptr;
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, to_concatenate_, &to_concatenate_graph));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph, to_concatenate_graph}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_impls_.resize(2);
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&input_contexts_[0], this, strings::StrCat(prefix(), "[0]"),
&input_impls_[0]));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impls_[0] && !input_impls_[1]) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper()) {
if (input_impls_[1] == nullptr) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
if (input_contexts_[0].index_mapper() == nullptr) {
IndexMapperFn left_index_mapper =
[index_mapper = ctx->index_mapper(),
left_cardinality = dataset()->input_cardinality_,
right_cardinality = dataset()->to_concatenate_cardinality_](
size_t to_idx) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t from_idx, index_mapper(to_idx));
if (from_idx >= left_cardinality + right_cardinality) {
return absl::OutOfRangeError("Running out of elements.");
}
if (from_idx >= left_cardinality) {
return absl::NotFoundError("Skipping this element.");
}
return from_idx;
};
IndexMapperFn right_index_mapper =
[index_mapper = ctx->index_mapper(),
left_cardinality = dataset()->input_cardinality_,
right_cardinality = dataset()->to_concatenate_cardinality_](
size_t to_idx) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t from_idx, index_mapper(to_idx));
if (from_idx >= left_cardinality + right_cardinality) {
return absl::OutOfRangeError("Running out of elements.");
}
if (from_idx < left_cardinality) {
return absl::NotFoundError("Skipping this element.");
}
return from_idx - left_cardinality;
};
input_contexts_[0].SetIndexMapper(left_index_mapper);
input_contexts_[1].SetIndexMapper(right_index_mapper);
}
absl::StatusOr<size_t> shuffled_index =
GetNextShuffledIndex(ctx->index_mapper(), element_count_);
if (absl::IsOutOfRange(shuffled_index.status())) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(shuffled_index.status());
bool temp_end_of_sequence = false;
absl::Status status = absl::OkStatus();
if (shuffled_index.value() < dataset()->input_cardinality_) {
status = input_impls_[0]->GetNext(&input_contexts_[0], out_tensors,
&temp_end_of_sequence);
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
} else {
status = input_impls_[1]->GetNext(&input_contexts_[1], out_tensors,
&temp_end_of_sequence);
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
TF_RETURN_IF_ERROR(status);
if (temp_end_of_sequence) {
*end_of_sequence = temp_end_of_sequence;
return absl::OkStatus();
}
return absl::OkStatus();
}
for (; i_ < 2; ++i_) {
TF_RETURN_IF_ERROR(input_impls_[i_]->GetNext(
&input_contexts_[i_], out_tensors, end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[i_].checkpoint());
if (!*end_of_sequence) {
return absl::OkStatus();
}
if (i_ == 0) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
}
*end_of_sequence = true;
input_impls_[0].reset();
input_impls_[1].reset();
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kIndex, i_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kElementCount, element_count_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 0),
static_cast<int64_t>(!input_impls_[0])));
if (input_impls_[0]) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impls_[0]));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 1),
static_cast<int64_t>(!input_impls_[1])));
if (input_impls_[1]) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impls_[1]));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_uninitialized[2];
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 0),
&input_uninitialized[0]));
if (static_cast<bool>(input_uninitialized[0])) {
input_impls_[0].reset();
}
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 1),
&input_uninitialized[1]));
if (static_cast<bool>(input_uninitialized[1])) {
input_impls_[1].reset();
}
if (ctx->restored_element_count()) {
if (input_impls_.size() != 2) {
return absl::FailedPreconditionError(
"`Initialize` should be called before restoring from the "
"checkpoint.");
}
{
int64_t tmp_element_count;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kElementCount, &tmp_element_count));
if (tmp_element_count < 0) {
return absl::FailedPreconditionError(absl::StrFormat(
"element_count should be >= 0. Got %d", tmp_element_count));
}
element_count_ = static_cast<size_t>(tmp_element_count);
}
if (!static_cast<bool>(input_uninitialized[0])) {
if (!input_impls_[0]) {
return absl::FailedPreconditionError(
"Something went wrong internally. The first iterator should "
"exist because of `Initialize`.");
}
input_contexts_[0].set_restored_element_count(
*ctx->restored_element_count());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[0], reader, input_impls_[0]));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
}
if (!static_cast<bool>(input_uninitialized[1])) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
input_contexts_[1].set_restored_element_count(
*ctx->restored_element_count());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[1], reader, input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kIndex, &i_));
if (!TF_PREDICT_TRUE(i_ >= 0 && i_ <= 2))
return errors::InvalidArgument("i_ must be in range [0, 2].");
if (!static_cast<bool>(input_uninitialized[0])) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impls_[0]));
}
if (!static_cast<bool>(input_uninitialized[1])) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[1], reader, input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::vector<std::unique_ptr<IteratorBase>> input_impls_ TF_GUARDED_BY(mu_);
std::vector<IteratorContext> input_contexts_ TF_GUARDED_BY(mu_);
size_t element_count_ TF_GUARDED_BY(mu_) = 0;
};
Status MostSpecificCompatibleShape(const PartialTensorShape& ts1,
const PartialTensorShape& ts2,
PartialTensorShape* output_tensorshape) {
if (ts1.dims() != ts2.dims() || ts1.unknown_rank() || ts2.unknown_rank())
return absl::OkStatus();
auto dims1 = ts1.dim_sizes();
auto dims2 = ts2.dim_sizes();
for (int d = 0; d < ts1.dims(); d++) {
if (dims1[d] == dims2[d])
TF_RETURN_IF_ERROR(output_tensorshape->AddDimWithStatus(dims1[d]));
else
TF_RETURN_IF_ERROR(output_tensorshape->AddDimWithStatus(-1));
}
return absl::OkStatus();
}
const DatasetBase* input_;
const DatasetBase* to_concatenate_;
const int64_t input_cardinality_;
const int64_t to_concatenate_cardinality_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
};
ConcatenateDatasetOp::ConcatenateDatasetOp(OpKernelConstruction* ctx)
: BinaryDatasetOpKernel(ctx) {}
void ConcatenateDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase* to_concatenate,
DatasetBase** output) {
OP_REQUIRES(ctx, input->output_dtypes() == to_concatenate->output_dtypes(),
errors::InvalidArgument(
"input dataset and dataset to concatenate"
" have different output_types %s and %s",
(DataTypeVectorString(input->output_dtypes()),
DataTypeVectorString(to_concatenate->output_dtypes()))));
*output = new Dataset(ctx, input, to_concatenate);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ConcatenateDataset").Device(DEVICE_CPU),
ConcatenateDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/concatenate_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "concatenate_dataset";
ConcatenateDatasetParams SameShapeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{2, 2},
{{1, 2, 3, 4}, {5, 6, 7, 8}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(
TensorShape{2, 2}, {{11, 12, 13, 14}, {15, 16, 17, 18}}),
"tensor_slice_1");
return ConcatenateDatasetParams(
std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64, DT_INT64},
{PartialTensorShape({2}), PartialTensorShape({2})},
kNodeName);
}
ConcatenateDatasetParams DifferentShapeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 3}, {1, 2, 3, 4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2, 2}, {7, 8, 9, 10})},
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {11, 12, 13, 14}),
CreateTensor<int64_t>(TensorShape{2, 1}, {15, 16})},
"tensor_slice_1");
return ConcatenateDatasetParams(
std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64, DT_INT64},
{PartialTensorShape({-1}), PartialTensorShape({-1})},
kNodeName);
}
ConcatenateDatasetParams DifferentDtypeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{2, 2}, {{1, 2, 3, 4}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<double>(TensorShape{2, 2}, {{1.0, 2.0, 3.0, 4.0}}),
"tensor_slice_1");
return ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
class ConcatenateDatasetOpTest : public DatasetOpsTestBase {};
std::vector<GetNextTestCase<ConcatenateDatasetParams>> GetNextTestCases() {
return {{SameShapeConcatenateDatasetParams(),
CreateTensors<int64_t>(TensorShape({2}), {{1, 2},
{5, 6},
{3, 4},
{7, 8},
{11, 12},
{15, 16},
{13, 14},
{17, 18}})},
{DifferentShapeConcatenateDatasetParams(),
{CreateTensor<int64_t>(TensorShape{3}, {1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2}, {7, 8}),
CreateTensor<int64_t>(TensorShape{3}, {4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2}, {9, 10}),
CreateTensor<int64_t>(TensorShape{2}, {11, 12}),
CreateTensor<int64_t>(TensorShape{1}, {15}),
CreateTensor<int64_t>(TensorShape{2}, {13, 14}),
CreateTensor<int64_t>(TensorShape{1}, {16})}}};
}
ITERATOR_GET_NEXT_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
GetNextTestCases())
TEST_F(ConcatenateDatasetOpTest, DifferentDtypes) {
auto dataset_params = DifferentDtypeConcatenateDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ConcatenateDatasetOpTest, DatasetNodeName) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ConcatenateDatasetOpTest, DatasetTypeString) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ConcatenateDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ConcatenateDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_dtypes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<ConcatenateDatasetParams>>
DatasetOutputShapesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_shapes()},
{
DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ConcatenateDatasetParams>>
CardinalityTestCases() {
return {{SameShapeConcatenateDatasetParams(),
4},
{DifferentShapeConcatenateDatasetParams(),
4}};
}
DATASET_CARDINALITY_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ConcatenateDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_dtypes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<ConcatenateDatasetParams>>
IteratorOutputShapesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_shapes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ConcatenateDatasetOpTest, IteratorPrefix) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ConcatenateDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ConcatenateDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{SameShapeConcatenateDatasetParams(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({2}), {{1, 2},
{5, 6},
{3, 4},
{7, 8},
{11, 12},
{15, 16},
{13, 14},
{17, 18}})},
{DifferentShapeConcatenateDatasetParams(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{3}, {1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2}, {7, 8}),
CreateTensor<int64_t>(TensorShape{3}, {4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2}, {9, 10}),
CreateTensor<int64_t>(TensorShape{2}, {11, 12}),
CreateTensor<int64_t>(TensorShape{1}, {15}),
CreateTensor<int64_t>(TensorShape{2}, {13, 14}),
CreateTensor<int64_t>(TensorShape{1}, {16})}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/concatenate_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/concatenate_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8af32f24-845c-498c-bf76-f9bf36338851 | cpp | tensorflow/tensorflow | batch_dataset_op | tensorflow/core/kernels/data/batch_dataset_op.cc | tensorflow/core/kernels/data/batch_dataset_op_test.cc | #include "tensorflow/core/kernels/data/batch_dataset_op.h"
#include <algorithm>
#include <cstdlib>
#include <functional>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
constexpr const char* const BatchDatasetOp::kDatasetType;
constexpr const char* const BatchDatasetOp::kInputDataset;
constexpr const char* const BatchDatasetOp::kBatchSize;
constexpr const char* const BatchDatasetOp::kDropRemainder;
constexpr const char* const BatchDatasetOp::kParallelCopy;
constexpr const char* const BatchDatasetOp::kOutputTypes;
constexpr const char* const BatchDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kBatchDataset[] = "BatchDataset";
class BatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t batch_size, bool drop_remainder,
bool parallel_copy, const DatasetBase* input, int op_version)
: DatasetBase(DatasetContext(ctx)),
batch_size_(batch_size),
reserve_size_(drop_remainder ? batch_size
: std::min<int64_t>(batch_size, 1 << 16)),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
input_(input),
op_version_(op_version),
traceme_metadata_(
{{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"},
{"parallel_copy", parallel_copy ? "true" : "false"}}) {
input_->Ref();
const auto& input_shapes = input_->output_shapes();
output_shapes_.reserve(input_shapes.size());
for (const auto& input_shape : input_shapes) {
if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) {
output_shapes_.emplace_back(
PartialTensorShape({batch_size_}).Concatenate(input_shape));
} else {
output_shapes_.emplace_back(
PartialTensorShape({-1}).Concatenate(input_shape));
}
}
random_indexing_compatible_ = absl::OkStatus();
if (!drop_remainder_) {
random_indexing_compatible_ = absl::FailedPreconditionError(absl::StrCat(
type_string(),
" does not support global shuffling with `drop_remainder=False`."));
} else if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(batch_size_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
const int64 cardinality = Cardinality();
if (index < 0 || index >= cardinality) {
return errors::OutOfRange("Index out of range [0, ", cardinality,
"):", index);
}
int batch_start_index = batch_size_ * index;
std::vector<std::vector<Tensor>> batch_elements;
int input_cardinality = input_->Cardinality();
for (int i = batch_start_index;
i < batch_start_index + batch_size_ && i < input_cardinality; ++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_->Get(ctx, i, &batch_element_tuple));
batch_elements.emplace_back(std::move(batch_element_tuple));
}
TF_RETURN_IF_ERROR(CopyBatch(AnyContext(ctx), std::move(batch_elements),
parallel_copy_, out_tensors));
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size));
Node* drop_remainder = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder));
AttrValue parallel_copy;
b->BuildAttrValue(parallel_copy_, ¶llel_copy);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, batch_size, drop_remainder},
{{kParallelCopy, parallel_copy}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
tsl::mutex_lock l(mu_);
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::vector<std::vector<Tensor>> batch_elements;
{
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
batch_elements.reserve(dataset()->reserve_size_);
*end_of_sequence = false;
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence; ++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
&batch_element_tuple,
end_of_sequence));
if (!*end_of_sequence) {
batch_elements.emplace_back(std::move(batch_element_tuple));
} else {
input_impl_.reset();
}
}
ctx_with_index_mapper.MergeCheckpoint();
}
if (batch_elements.empty()) {
DCHECK(*end_of_sequence);
return absl::OkStatus();
}
if (dataset()->drop_remainder_ &&
batch_elements.size() < dataset()->batch_size_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CopyBatch(AnyContext(ctx), std::move(batch_elements),
dataset()->parallel_copy_, out_tensors));
*end_of_sequence = false;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t batch_size = dataset()->batch_size_;
return [parent_index_mapper,
batch_size](size_t element_position) -> absl::StatusOr<size_t> {
size_t batch_element_position = element_position / batch_size;
size_t input_element_offset = element_position % batch_size;
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(batch_element_position));
return shuffled_element_position * batch_size + input_element_offset;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args), dataset()->batch_size_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (ctx->restored_element_count().has_value()) {
IteratorContext::Params params(ctx);
params.restored_element_count =
*ctx->restored_element_count() * dataset()->batch_size_;
IteratorContext ctx_copy(params);
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(&ctx_copy, reader, input_impl_));
ctx->MergeCheckpoint(ctx_copy.checkpoint());
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t batch_size_;
const int64_t reserve_size_;
const bool drop_remainder_;
const bool parallel_copy_;
const DatasetBase* const input_;
const int op_version_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_;
const TraceMeMetadata traceme_metadata_;
};
BatchDatasetOp::BatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->def().op() == kBatchDataset ? 1 : 2) {
if (ctx->HasAttr(kParallelCopy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, ¶llel_copy_));
}
}
void BatchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t batch_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("Batch size must be greater than zero."));
bool drop_remainder = false;
if (op_version_ > 1) {
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
}
*output = new Dataset(ctx, batch_size, drop_remainder, parallel_copy_, input,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("BatchDataset").Device(DEVICE_CPU),
BatchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("BatchDatasetV2").Device(DEVICE_CPU),
BatchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/batch_dataset_op.h"
#include <string>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "batch_dataset";
class BatchDatasetOpTest : public DatasetOpsTestBase {};
BatchDatasetParams BatchDatasetParams1() {
return BatchDatasetParams(RangeDatasetParams(0, 12, 1),
4,
false,
true,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams2() {
return BatchDatasetParams(RangeDatasetParams(0, 12, 1),
4,
true,
false,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams3() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
3,
false,
false,
{DT_INT64},
{PartialTensorShape({-1})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams4() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
3,
true,
true,
{DT_INT64},
{PartialTensorShape({3})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams5() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
12,
true,
true,
{DT_INT64},
{PartialTensorShape({12})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams6() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
12,
false,
true,
{DT_INT64},
{PartialTensorShape({-1})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams7() {
return BatchDatasetParams(RangeDatasetParams(0, 0, 1),
4,
false,
false,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams InvalidBatchSizeBatchDatasetParams() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
-1,
false,
false,
{DT_INT64},
{PartialTensorShape({3})},
kNodeName);
}
std::vector<GetNextTestCase<BatchDatasetParams>> GetNextTestCases() {
return {{BatchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{BatchDatasetParams4(),
CreateTensors<int64_t>(TensorShape({3}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{BatchDatasetParams5(),
{}},
{BatchDatasetParams6(),
CreateTensors<int64_t>(TensorShape({10}),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}})},
{BatchDatasetParams7(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
GetNextTestCases())
TEST_F(BatchDatasetOpTest, DatasetNodeName) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(batch_dataset_params.node_name()));
}
TEST_F(BatchDatasetOpTest, DatasetTypeString) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
name_utils::OpNameParams params;
params.op_version = batch_dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(BatchDatasetOp::kDatasetType, params)));
}
TEST_F(BatchDatasetOpTest, DatasetOutputDtypes) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<BatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{BatchDatasetParams1(),
{PartialTensorShape({4})}},
{BatchDatasetParams2(),
{PartialTensorShape({4})}},
{BatchDatasetParams3(),
{PartialTensorShape({-1})}},
{BatchDatasetParams4(),
{PartialTensorShape({3})}},
{BatchDatasetParams5(),
{PartialTensorShape({12})}},
{BatchDatasetParams6(),
{PartialTensorShape({-1})}},
{BatchDatasetParams7(),
{PartialTensorShape({4})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<BatchDatasetParams>> CardinalityTestCases() {
return {
{BatchDatasetParams1(), 3},
{BatchDatasetParams2(), 3},
{BatchDatasetParams3(), 4},
{BatchDatasetParams4(), 3},
{BatchDatasetParams5(), 0},
{BatchDatasetParams6(), 1},
{BatchDatasetParams7(), 0}};
}
DATASET_CARDINALITY_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
CardinalityTestCases())
TEST_F(BatchDatasetOpTest, IteratorOutputDtypes) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<BatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{BatchDatasetParams1(),
{PartialTensorShape({4})}},
{BatchDatasetParams2(),
{PartialTensorShape({4})}},
{BatchDatasetParams3(),
{PartialTensorShape({-1})}},
{BatchDatasetParams4(),
{PartialTensorShape({3})}},
{BatchDatasetParams5(),
{PartialTensorShape({12})}},
{BatchDatasetParams6(),
{PartialTensorShape({-1})}},
{BatchDatasetParams7(),
{PartialTensorShape({4})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(BatchDatasetOpTest, IteratorOutputPrefix) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = batch_dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
BatchDatasetOp::kDatasetType, batch_dataset_params.iterator_prefix(),
params)));
}
std::vector<IteratorSaveAndRestoreTestCase<BatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{BatchDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams3(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{BatchDatasetParams4(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8})}},
{BatchDatasetParams5(),
{0, 1, 5},
{}},
{BatchDatasetParams6(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({10}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})}},
{BatchDatasetParams7(),
{0, 1, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(BatchDatasetOpTest, InvalidBatchSize) {
auto batch_dataset_params = InvalidBatchSizeBatchDatasetParams();
EXPECT_EQ(Initialize(batch_dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
REGISTER_OP("BatchDatasetOpTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"));
static void add_identity_nodes(Node* node, Graph& graph,
std::vector<Node*>& identity_nodes) {
for (int i = 0; i < node->num_outputs(); i++) {
Node* new_node;
std::string name = absl::StrCat("Identity", i);
TF_EXPECT_OK(NodeBuilder(name, "Identity")
.Attr("T", node->output_type(i))
.Input(node, i)
.Finalize(&graph, &new_node));
identity_nodes.push_back(new_node);
}
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST(BatchDatsetOpTest, TypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* batch_size;
Node* drop_remainder;
Node* batch_dataset_v2;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(NodeBuilder("batch_size", "BatchDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &batch_size));
TF_EXPECT_OK(NodeBuilder("drop_remainder", "BatchDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_BOOL)
.Finalize(&graph, &drop_remainder));
TF_EXPECT_OK(NodeBuilder("BatchDatasetV2", "BatchDatasetV2")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(batch_size)
.Input(drop_remainder)
.Finalize(&graph, &batch_dataset_v2));
std::vector<Node*> identity_nodes;
add_identity_nodes(batch_dataset_v2, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/batch_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/batch_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
703e8d85-24a9-416f-9372-b1a016d13026 | cpp | tensorflow/tensorflow | parallel_filter_dataset_op | tensorflow/core/kernels/data/parallel_filter_dataset_op.cc | tensorflow/core/kernels/data/parallel_filter_dataset_op_test.cc | #include "tensorflow/core/kernels/data/parallel_filter_dataset_op.h"
#include <deque>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelFilterDatasetOp::kDatasetType;
constexpr const char* const ParallelFilterDatasetOp::kInputDataset;
constexpr const char* const
ParallelFilterDatasetOp::kOtherArguments;
constexpr const char* const
ParallelFilterDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelFilterDatasetOp::kPredicate;
constexpr const char* const
ParallelFilterDatasetOp::kDeterministic;
constexpr const char* const ParallelFilterDatasetOp::kTarguments;
constexpr const char* const ParallelFilterDatasetOp::kOutputTypes;
constexpr const char* const ParallelFilterDatasetOp::kOutputShapes;
constexpr char kComponent[] = "component";
constexpr char kReturnValues[] = "return_values";
constexpr char kPredicateValues[] = "predicate_values";
constexpr char kInvocationResults[] = "invocation_results";
constexpr char kSize[] = "size";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kErrorCode[] = "code";
constexpr char kErrorMessage[] = "error_message";
class ParallelFilterDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
int64_t num_parallel_calls, DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
num_parallel_calls_(num_parallel_calls),
deterministic_(deterministic),
captured_func_(std::move(captured_func)) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
Node* num_parallel_calls = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(static_cast<int32>(num_parallel_calls_),
&num_parallel_calls));
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
AttrValue predicate_attr;
b->BuildAttrValue(captured_func_->func(), &predicate_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {{0, input_graph_node}}, {{1, other_arguments}},
{{kDeterministic, deterministic_attr},
{kPredicate, predicate_attr},
{kTarguments, other_arguments_types_attr}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()),
autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {}
~Iterator() override {
CancelThreads(true);
input_impl_.reset();
if (deregister_fn_) deregister_fn_();
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
IteratorContext(params), this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<InvocationResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(ctx, &result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelFilterConsume",
{{"element_id", result->uid}});
});
return ProcessResult(ctx, result, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncUnknownRatioNode(
std::move(args),
{model::MakeParameter("parallelism", num_parallel_calls_, 1,
ctx->runner_threadpool_size())});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
if (num_calls_ != 0) {
return errors::FailedPrecondition(
"Unexpected outstanding calls encountered.");
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(absl::StrCat(prefix(), "::", kInvocationResults),
kSize, invocation_results_.size()));
for (size_t i = 0; i < invocation_results_.size(); i++) {
const auto& result = *(invocation_results_[i]);
std::string element_prefix =
absl::StrCat(prefix(), "::", kInvocationResults, "::", i);
TF_RETURN_IF_ERROR(
WriteStatusLocked(writer, element_prefix, result.status));
TF_RETURN_IF_ERROR(WriteComponentsLocked(
writer, absl::StrCat(element_prefix, "::", kReturnValues),
result.return_values));
TF_RETURN_IF_ERROR(WriteComponentsLocked(
writer, absl::StrCat(element_prefix, "::", kPredicateValues),
result.predicate_values));
if (result.end_of_input) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kEndOfInput, ""));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t invocation_results_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", kInvocationResults),
kSize, &invocation_results_size));
DCHECK(invocation_results_.empty());
for (size_t i = 0; i < invocation_results_size; i++) {
invocation_results_.push_back(std::make_shared<InvocationResult>());
auto& result = *invocation_results_.back();
std::string element_prefix =
absl::StrCat(prefix(), "::", kInvocationResults, "::", i);
TF_RETURN_IF_ERROR(
ReadStatusLocked(reader, element_prefix, &result.status));
TF_RETURN_IF_ERROR(ReadComponentsLocked(
ctx, reader, absl::StrCat(element_prefix, "::", kReturnValues),
&result.return_values));
TF_RETURN_IF_ERROR(ReadComponentsLocked(
ctx, reader, absl::StrCat(element_prefix, "::", kPredicateValues),
&result.predicate_values));
result.end_of_input = reader->Contains(element_prefix, kEndOfInput);
RecordBufferEnqueue(ctx, result.return_values);
result.notification.Notify();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
data::TraceMeMetadata result;
result.push_back(
std::make_pair("autotune", autotune_ ? "true" : "false"));
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct InvocationResult {
InvocationResult() : uid(tensorflow::EnvTime::NowNanos()) {}
Notification notification;
Status status;
std::vector<Tensor> return_values;
std::vector<Tensor> predicate_values;
bool end_of_input = false;
const int64_t uid;
};
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto ctx_copy = std::make_shared<IteratorContext>(*ctx);
runner_thread_ = ctx->StartThread(
"tf_data_parallel_filter",
std::bind(&Iterator::RunnerThread, this, ctx_copy));
}
}
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->notification.Notify();
cond_var_->notify_all();
}
void CallFunction(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelFilterProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
result->status = input_impl_->GetNext(ctx.get(), &input_element,
&result->end_of_input);
if (result->end_of_input || !result->status.ok()) {
CallCompleted(ctx, result);
return;
}
result->return_values = input_element;
auto done = [this, ctx, result](Status status) {
result->status.Update(status);
if (status.ok() && (result->predicate_values.size() != 1 ||
result->predicate_values[0].dtype() != DT_BOOL ||
result->predicate_values[0].NumElements() != 1)) {
result->status.Update(errors::InvalidArgument(
"Filter predicate `predicate` must return a scalar bool."));
}
RecordBufferEnqueue(ctx.get(), result->return_values);
CallCompleted(ctx, result);
};
if (dataset()->captured_func_->use_inter_op_parallelism()) {
instantiated_captured_func_->RunAsync(
ctx.get(), std::move(input_element), &result->predicate_values,
std::move(done), model_node());
} else {
auto fn = std::bind(
[this, ctx, result](std::vector<Tensor> input_element) {
return instantiated_captured_func_->Run(
ctx.get(), std::move(input_element),
&result->predicate_values, model_node());
},
std::move(input_element));
(*ctx->runner())(
[this, ctx, fn = std::move(fn), done = std::move(done)]() {
Status s;
if (IsRecording(ctx.get())) {
s = fn();
} else {
RecordStart(ctx.get());
s = fn();
RecordStop(ctx.get());
}
done(s);
});
}
}
Status ProcessResult(IteratorContext* ctx,
const std::shared_ptr<InvocationResult>& result,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) {
if (!result->end_of_input && result->status.ok()) {
*out_tensors = std::move(result->return_values);
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(result->status)) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
result->status.message());
}
*end_of_sequence = result->end_of_input;
return result->status;
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
std::vector<std::shared_ptr<InvocationResult>> new_calls;
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
invocation_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
invocation_results_.push_back(std::make_shared<InvocationResult>());
new_calls.push_back(invocation_results_.back());
num_calls_++;
}
cond_var_->notify_all();
}
for (const auto& call : new_calls) {
CallFunction(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(IteratorContext* ctx,
std::shared_ptr<InvocationResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
auto PredicateReady = [](const InvocationResult* result) -> bool {
return result->status.ok() && !result->end_of_input;
};
auto GetPredicateValue = [](const InvocationResult* result) -> bool {
return result->predicate_values[0].scalar<bool>()();
};
while (!invocation_results_.empty() &&
invocation_results_.front()->notification.HasBeenNotified() &&
PredicateReady(invocation_results_.front().get()) &&
!GetPredicateValue(invocation_results_.front().get())) {
RecordBufferDequeue(ctx, invocation_results_.front()->return_values);
invocation_results_.pop_front();
cond_var_->notify_all();
}
if (!deterministic_) {
for (auto it = invocation_results_.begin();
it != invocation_results_.end(); ++it) {
if ((*it)->notification.HasBeenNotified() &&
(it == invocation_results_.begin() ||
(PredicateReady(it->get()) && GetPredicateValue(it->get())))) {
std::swap(*result, *it);
invocation_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else {
if (!invocation_results_.empty() &&
invocation_results_.front()->notification.HasBeenNotified()) {
std::swap(*result, invocation_results_.front());
invocation_results_.pop_front();
if (!(*result)->end_of_input) {
RecordBufferDequeue(ctx, (*result)->return_values);
}
cond_var_->notify_all();
return false;
}
}
return true;
}
Status WriteComponentsLocked(IteratorStateWriter* writer,
const std::string& prefix,
const std::vector<Tensor>& values)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, kSize, values.size()));
for (size_t j = 0; j < values.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix, absl::StrCat(kComponent, "[", j, "]"), values[j]));
}
return absl::OkStatus();
}
Status ReadComponentsLocked(IteratorContext* ctx,
IteratorStateReader* reader,
const std::string& prefix,
std::vector<Tensor>* values)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t size;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix, kSize, &size));
size_t num_return_values = static_cast<size_t>(size);
if (num_return_values != size) {
return errors::InvalidArgument(prefix, ",", kSize, ": ", size,
" is not a valid value of type size_t.");
}
values->reserve(num_return_values);
for (size_t j = 0; j < num_return_values; j++) {
values->emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix, absl::StrCat(kComponent, "[", j, "]"),
&values->back()));
}
return absl::OkStatus();
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const std::string& key, const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
key, kErrorCode, static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(key, kErrorMessage,
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader, const std::string& key,
Status* status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(key, kErrorCode, &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(
reader->ReadScalar(key, kErrorMessage, &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
const bool deterministic_;
const bool autotune_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<InvocationResult>> invocation_results_
TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
};
const DatasetBase* const input_;
const int64_t num_parallel_calls_;
const DeterminismPolicy deterministic_;
const std::unique_ptr<CapturedFunction> captured_func_;
};
ParallelFilterDatasetOp::ParallelFilterDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kPredicate, {},
&func_metadata_));
OP_REQUIRES(ctx, func_metadata_->short_circuit_info().indices.size() <= 1,
errors::InvalidArgument(
"predicate function has more than one return value."));
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(ctx,
DeterminismPolicy::FromString(deterministic, &deterministic_));
}
void ParallelFilterDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t num_parallel_calls;
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
if (num_parallel_calls == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, num_parallel_calls, deterministic_,
std::move(captured_func));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ParallelFilterDataset").Device(DEVICE_CPU),
ParallelFilterDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ParallelFilterDataset");
}
}
} | #include "tensorflow/core/kernels/data/parallel_filter_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_map_dataset";
class ParallelFilterDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelFilterDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int num_parallel_calls, const std::string& deterministic,
FunctionDefHelper::AttrValueWrapper pred_func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
num_parallel_calls_(num_parallel_calls),
deterministic_(deterministic),
pred_func_(std::move(pred_func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size());
input_names->emplace_back(ParallelFilterDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelFilterDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelFilterDatasetOp::kNumParallelCalls);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{"predicate", pred_func_}, {"Targuments", type_arguments_},
{"output_shapes", output_shapes_}, {"output_types", output_dtypes_},
{"deterministic", deterministic_}, {"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelFilterDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int num_parallel_calls_;
std::string deterministic_;
FunctionDefHelper::AttrValueWrapper pred_func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class ParallelFilterDatasetOpTest : public DatasetOpsTestBase {};
ParallelFilterDatasetParams ParallelFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kNondeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
4,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
4,
DeterminismPolicy::kNondeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
model::kAutotune,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams InputHasNoElementParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("GetUnique",
{{"T", DT_INT64}, {"out_idx", DT_INT32}}),
{test::function::Unique()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("NonZero", {{"T", DT_INT64}}),
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ParallelFilterDatasetParams>> GetNextTestCases() {
return {{ParallelFilterDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams2(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams3(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams4(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams5(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams6(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{InputHasNoElementParams(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams, GetNextTestCases())
TEST_F(ParallelFilterDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelFilterDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelFilterDatasetOp::kDatasetType)));
}
TEST_F(ParallelFilterDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<ParallelFilterDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ParallelFilterDatasetParams1(),
{PartialTensorShape({1})}},
{InputHasNoElementParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ParallelFilterDatasetParams>>
CardinalityTestCases() {
return {{ParallelFilterDatasetParams1(),
kUnknownCardinality},
{InputHasNoElementParams(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams, CardinalityTestCases())
TEST_F(ParallelFilterDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<ParallelFilterDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ParallelFilterDatasetParams1(),
{PartialTensorShape({1})}},
{InputHasNoElementParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ParallelFilterDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelFilterDatasetOp::kDatasetType,
dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelFilterDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelFilterDatasetParams1(),
{0, 2, 6},
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{InputHasNoElementParams(),
{0, 2, 6},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidPredicateFuncTest
: public ParallelFilterDatasetOpTest,
public ::testing::WithParamInterface<ParallelFilterDatasetParams> {};
TEST_P(ParameterizedInvalidPredicateFuncTest, InvalidPredicateFunc) {
auto dataset_params = GetParam();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_SUITE_P(
ParallelFilterDatasetOpTest, ParameterizedInvalidPredicateFuncTest,
::testing::ValuesIn({InvalidPredFuncFilterDatasetParams1(),
InvalidPredFuncFilterDatasetParams2(),
InvalidPredFuncFilterDatasetParams3()}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_filter_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_filter_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4403960-776f-4231-9135-ac98cd07c779 | cpp | tensorflow/tensorflow | tf_record_dataset_op | tensorflow/core/kernels/data/tf_record_dataset_op.cc | tensorflow/core/kernels/data/tf_record_dataset_op_test.cc | #include "tensorflow/core/kernels/data/tf_record_dataset_op.h"
#include <cstdint>
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace data {
constexpr const char* const TFRecordDatasetOp::kDatasetType;
constexpr const char* const TFRecordDatasetOp::kFileNames;
constexpr const char* const TFRecordDatasetOp::kCompressionType;
constexpr const char* const TFRecordDatasetOp::kBufferSize;
constexpr const char* const TFRecordDatasetOp::kByteOffsets;
constexpr char kTFRecordDataset[] = "TFRecordDataset";
constexpr char kCurrentFileIndex[] = "current_file_index";
constexpr char kOffset[] = "offset";
constexpr char kGcsFsPrefix[] = "gs:
constexpr char kS3FsPrefix[] = "s3:
constexpr int64_t kUnspecifiedBufferSize = -1;
constexpr int64_t kDefaultBufferSize = 256LL << 10;
constexpr int64_t kCloudTpuBlockSize = 127LL << 20;
constexpr int64_t kS3BlockSize = kCloudTpuBlockSize;
bool is_cloud_tpu_gcs_fs() {
#if (defined(PLATFORM_CLOUD_TPU) && defined(TPU_GCS_FS)) || \
defined(LIBTPU_ON_GCE)
return true;
#endif
return false;
}
class TFRecordDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, std::vector<string> filenames,
const string& compression_type, int64_t buffer_size,
std::vector<int64_t> byte_offsets, int op_version)
: DatasetBase(DatasetContext(ctx)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
options_(io::RecordReaderOptions::CreateRecordReaderOptions(
compression_type)),
byte_offsets_(std::move(byte_offsets)),
op_version_(op_version) {
if (buffer_size > 0) {
options_.buffer_size = buffer_size;
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(kDatasetType, params);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* filenames = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames));
Node* compression_type = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type));
Node* buffer_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(options_.buffer_size, &buffer_size));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {filenames, compression_type, buffer_size}, output));
Node* byte_offsets = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(byte_offsets_, &byte_offsets));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
out_tensors->reserve(1);
mutex_lock l(mu_);
do {
if (reader_) {
out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({}));
Status s =
reader_->ReadRecord(&out_tensors->back().scalar<tstring>()());
if (s.ok()) {
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(kDatasetType);
bytes_counter->IncrementBy(
out_tensors->back().scalar<tstring>()().size());
*end_of_sequence = false;
return absl::OkStatus();
}
out_tensors->pop_back();
if (!errors::IsOutOfRange(s)) {
ResetStreamsLocked();
++current_file_index_;
return s;
}
ResetStreamsLocked();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
} while (true);
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
*num_skipped = 0;
mutex_lock l(mu_);
do {
if (reader_) {
int last_num_skipped;
Status s = reader_->SkipRecords(num_to_skip - *num_skipped,
&last_num_skipped);
*num_skipped += last_num_skipped;
if (s.ok()) {
*end_of_sequence = false;
return absl::OkStatus();
}
if (!errors::IsOutOfRange(s)) {
ResetStreamsLocked();
++current_file_index_;
return s;
}
ResetStreamsLocked();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
if (reader_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kOffset, reader_->TellOffset()));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
ResetStreamsLocked();
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
if (reader->Contains(prefix(), kOffset)) {
int64_t offset;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kOffset, &offset));
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
TF_RETURN_IF_ERROR(reader_->SeekOffset(offset));
}
return absl::OkStatus();
}
private:
Status SetupStreamsLocked(Env* env) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (current_file_index_ >= dataset()->filenames_.size()) {
return errors::InvalidArgument(
"current_file_index_:", current_file_index_,
" >= filenames_.size():", dataset()->filenames_.size());
}
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
reader_ = std::make_unique<io::SequentialRecordReader>(
file_.get(), dataset()->options_);
if (!dataset()->byte_offsets_.empty()) {
TF_RETURN_IF_ERROR(
reader_->SeekOffset(dataset()->byte_offsets_[current_file_index_]));
}
return absl::OkStatus();
}
void ResetStreamsLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
reader_.reset();
file_.reset();
}
mutex mu_;
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_ TF_GUARDED_BY(mu_);
std::unique_ptr<io::SequentialRecordReader> reader_ TF_GUARDED_BY(mu_);
};
const std::vector<string> filenames_;
const tstring compression_type_;
io::RecordReaderOptions options_;
const std::vector<int64_t> byte_offsets_;
const int op_version_;
};
TFRecordDatasetOp::TFRecordDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx),
op_version_(ctx->def().op() == kTFRecordDataset ? 1 : 2) {}
void TFRecordDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
const Tensor* filenames_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kFileNames, &filenames_tensor));
OP_REQUIRES(
ctx, filenames_tensor->dims() <= 1,
errors::InvalidArgument("`filenames` must be a scalar or a vector."));
bool is_gcs_fs = true;
bool is_s3_fs = true;
std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
VLOG(2) << "Reading file: " << filenames_tensor->flat<tstring>()(i);
filenames.push_back(filenames_tensor->flat<tstring>()(i));
is_gcs_fs &= absl::StartsWith(filenames[i], kGcsFsPrefix);
is_s3_fs &= absl::StartsWith(filenames[i], kS3FsPrefix);
metrics::RecordTFDataFilename(kDatasetType, filenames[i]);
}
LogFilenames(filenames);
tstring compression_type;
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kCompressionType,
&compression_type));
int64_t buffer_size = kUnspecifiedBufferSize;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(ctx,
(buffer_size == kUnspecifiedBufferSize) || (buffer_size >= 0),
errors::InvalidArgument(
"`buffer_size` must be >= 0 (0 == no buffering)"));
std::vector<int64_t> byte_offsets;
if (op_version_ > 1) {
const Tensor* byte_offsets_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kByteOffsets, &byte_offsets_tensor));
OP_REQUIRES(ctx, byte_offsets_tensor->dims() <= 1,
absl::InvalidArgumentError(
"`byte_offsets` must be a scalar or a vector."));
OP_REQUIRES(ctx, byte_offsets_tensor->dims() == filenames_tensor->dims(),
absl::InvalidArgumentError(
"`byte_offsets` must be of same size as `filenames`"));
byte_offsets.reserve(byte_offsets_tensor->NumElements());
for (int i = 0; i < byte_offsets_tensor->NumElements(); ++i) {
byte_offsets.push_back(byte_offsets_tensor->flat<int64_t>()(i));
}
}
if (buffer_size == kUnspecifiedBufferSize) {
if (is_gcs_fs && is_cloud_tpu_gcs_fs() &&
buffer_size < kCloudTpuBlockSize) {
LOG_FIRST_N(WARNING, 1)
<< "User buffer size is too small for reading Cloud TPU "
<< "TFRecords stored in GCS. Overriding " << buffer_size
<< " to the minimum recommended buffer_size = " << kCloudTpuBlockSize;
buffer_size = kCloudTpuBlockSize;
} else if (is_s3_fs && buffer_size < kS3BlockSize) {
LOG_FIRST_N(WARNING, 1)
<< "User buffer size is too small for reading "
<< "TFRecords stored in S3. Overriding " << buffer_size
<< " to the minimum recommended buffer_size = " << kS3BlockSize;
buffer_size = kS3BlockSize;
} else {
LOG_FIRST_N(INFO, 1)
<< "TFRecordDataset `buffer_size` is unspecified, default to "
<< kDefaultBufferSize;
buffer_size = kDefaultBufferSize;
}
} else {
LOG_FIRST_N(INFO, 1)
<< "The default buffer size is " << kDefaultBufferSize
<< ", which is overridden by the user specified `buffer_size` of "
<< buffer_size;
}
*output = new Dataset(ctx, std::move(filenames), compression_type,
buffer_size, std::move(byte_offsets), op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TFRecordDataset").Device(DEVICE_CPU),
TFRecordDatasetOp);
REGISTER_KERNEL_BUILDER(Name("TFRecordDatasetV2").Device(DEVICE_CPU),
TFRecordDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tf_record_dataset_op.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tf_record_dataset";
constexpr char kOpVersion = 2;
int64_t GetOffset(const std::string& filename, int64_t index) {
Env* env_ = Env::Default();
std::unique_ptr<RandomAccessFile> file_;
std::unique_ptr<io::SequentialRecordReader> reader;
Status s1 = env_->NewRandomAccessFile(filename, &file_);
TF_CHECK_OK(s1) << s1;
reader = std::make_unique<io::SequentialRecordReader>(file_.get());
for (int i = 0; i < index; ++i) {
tstring record;
Status s2 = reader->ReadRecord(&record);
TF_CHECK_OK(s2) << s2;
}
return reader->TellOffset();
}
class TFRecordDatasetParams : public DatasetParams {
public:
TFRecordDatasetParams(std::vector<tstring> filenames,
CompressionType compression_type, int64_t buffer_size,
std::vector<int64_t> byte_offsets, string node_name)
: DatasetParams({DT_STRING}, {PartialTensorShape({})},
std::move(node_name)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
buffer_size_(buffer_size),
byte_offsets_(std::move(byte_offsets)) {
op_version_ = 2;
}
std::vector<Tensor> GetInputTensors() const override {
int num_files = filenames_.size();
int num_byte_offsets = byte_offsets_.size();
return {
CreateTensor<tstring>(TensorShape({num_files}), filenames_),
CreateTensor<tstring>(TensorShape({}), {ToString(compression_type_)}),
CreateTensor<int64_t>(TensorShape({}), {buffer_size_}),
CreateTensor<int64_t>(TensorShape({num_byte_offsets}), byte_offsets_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
*input_names = {
TFRecordDatasetOp::kFileNames,
TFRecordDatasetOp::kCompressionType,
TFRecordDatasetOp::kBufferSize,
TFRecordDatasetOp::kByteOffsets,
};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return TFRecordDatasetOp::kDatasetType;
}
private:
std::vector<tstring> filenames_;
CompressionType compression_type_;
int64_t buffer_size_;
std::vector<int64_t> byte_offsets_;
};
class TFRecordDatasetOpTest : public DatasetOpsTestBase {};
Status CreateTestFiles(const std::vector<tstring>& filenames,
const std::vector<std::vector<string>>& contents,
CompressionType compression_type) {
if (filenames.size() != contents.size()) {
return tensorflow::errors::InvalidArgument(
"The number of files does not match with the contents");
}
for (int i = 0; i < filenames.size(); ++i) {
CompressionParams params;
params.output_buffer_size = 10;
params.compression_type = compression_type;
std::vector<absl::string_view> records(contents[i].begin(),
contents[i].end());
TF_RETURN_IF_ERROR(WriteDataToTFRecordFile(filenames[i], records, params));
}
return absl::OkStatus();
}
TFRecordDatasetParams TFRecordDatasetParams1() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_ZLIB_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_ZLIB_2")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"},
{"a", "bb", "ccc"}};
CompressionType compression_type = CompressionType::ZLIB;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TFRecordDatasetParams(filenames,
compression_type,
10,
{},
kNodeName);
}
TFRecordDatasetParams TFRecordDatasetParams2() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_GZIP_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_GZIP_2")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"},
{"a", "bb", "ccc"}};
CompressionType compression_type = CompressionType::GZIP;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TFRecordDatasetParams(filenames,
compression_type,
10,
{},
kNodeName);
}
TFRecordDatasetParams TFRecordDatasetParams3() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_2")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"},
{"a", "bb", "ccc"}};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
LOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TFRecordDatasetParams(filenames,
compression_type,
10,
{},
kNodeName);
}
TFRecordDatasetParams TFRecordDatasetParams4() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_2"),
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_3")};
std::vector<std::vector<string>> contents = {
{"1", "22", "333"}, {"a", "bb", "ccc"}, {"x", "yy", "zzz"}};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
absl::Status status = CreateTestFiles(filenames, contents, compression_type);
TF_CHECK_OK(status) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ") << ": " << status;
std::vector<int64_t> byte_offsets = {};
byte_offsets.push_back(GetOffset(filenames[0], 0));
byte_offsets.push_back(GetOffset(filenames[1], 1));
byte_offsets.push_back(GetOffset(filenames[1], 2));
return TFRecordDatasetParams(filenames,
compression_type,
10, byte_offsets,
kNodeName);
}
TFRecordDatasetParams InvalidByteOffsets() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_1")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"}};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
absl::Status status = CreateTestFiles(filenames, contents, compression_type);
TF_CHECK_OK(status) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ") << ": " << status;
return TFRecordDatasetParams(filenames,
compression_type,
10, {1},
kNodeName);
}
std::vector<GetNextTestCase<TFRecordDatasetParams>> GetNextTestCases() {
return {
{TFRecordDatasetParams1(),
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams2(),
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams3(),
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams4(),
CreateTensors<tstring>(
TensorShape({}),
{{"1"}, {"22"}, {"333"}, {"bb"}, {"ccc"}, {"zzz"}})}};
}
ITERATOR_GET_NEXT_TEST_P(TFRecordDatasetOpTest, TFRecordDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<TFRecordDatasetParams>> SkipTestCases() {
return {{TFRecordDatasetParams1(),
2, 2, true,
CreateTensors<tstring>(TensorShape({}), {{"333"}})},
{TFRecordDatasetParams1(),
4, 4, true,
CreateTensors<tstring>(TensorShape({}), {{"bb"}})},
{TFRecordDatasetParams1(),
7, 6},
{TFRecordDatasetParams2(),
2, 2, true,
CreateTensors<tstring>(TensorShape({}), {{"333"}})},
{TFRecordDatasetParams2(),
4, 4, true,
CreateTensors<tstring>(TensorShape({}), {{"bb"}})},
{TFRecordDatasetParams2(),
7, 6},
{TFRecordDatasetParams3(),
2, 2, true,
CreateTensors<tstring>(TensorShape({}), {{"333"}})},
{TFRecordDatasetParams3(),
4, 4, true,
CreateTensors<tstring>(TensorShape({}), {{"bb"}})},
{TFRecordDatasetParams3(),
7, 6}};
}
ITERATOR_SKIP_TEST_P(TFRecordDatasetOpTest, TFRecordDatasetParams,
SkipTestCases())
TEST_F(TFRecordDatasetOpTest, DatasetNodeName) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TFRecordDatasetOpTest, DatasetTypeString) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = kOpVersion;
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TFRecordDatasetOp::kDatasetType, params)));
}
TEST_F(TFRecordDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_STRING}));
}
TEST_F(TFRecordDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(TFRecordDatasetOpTest, Cardinality) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(TFRecordDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_STRING}));
}
TEST_F(TFRecordDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(TFRecordDatasetOpTest, IteratorPrefix) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams iterator_prefix_params;
iterator_prefix_params.op_version = kOpVersion;
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
TFRecordDatasetOp::kDatasetType, dataset_params.iterator_prefix(),
iterator_prefix_params)));
}
TEST_F(TFRecordDatasetOpTest, InvalidByteOffsetsToSeek) {
auto dataset_params = InvalidByteOffsets();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kDataLoss);
}
std::vector<IteratorSaveAndRestoreTestCase<TFRecordDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{TFRecordDatasetParams1(),
{0, 2, 7},
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams2(),
{0, 2, 7},
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams3(),
{0, 2, 7},
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(TFRecordDatasetOpTest, TFRecordDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tf_record_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tf_record_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
55108a1b-fd1f-4fb5-954f-c1e85a9793f1 | cpp | tensorflow/tensorflow | zip_dataset_op | tensorflow/core/kernels/data/zip_dataset_op.cc | tensorflow/core/kernels/data/zip_dataset_op_test.cc | #include "tensorflow/core/kernels/data/zip_dataset_op.h"
#include <functional>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ZipDatasetOp::kDatasetType;
constexpr const char* const ZipDatasetOp::kInputDatasets;
constexpr const char* const ZipDatasetOp::kOutputTypes;
constexpr const char* const ZipDatasetOp::kOutputShapes;
constexpr const char* const ZipDatasetOp::kNumInputDatasets;
constexpr char kInputImplsEmpty[] = "input_impls_empty";
class ZipDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx,
const std::vector<DatasetBase*>& inputs)
: DatasetBase(DatasetContext(ctx)), inputs_(inputs) {
for (const auto& input : inputs_) {
input->Ref();
for (DataType dt : input->output_dtypes()) {
output_dtypes_.push_back(dt);
}
output_shapes_.insert(output_shapes_.end(),
input->output_shapes().begin(),
input->output_shapes().end());
if (input != nullptr && random_indexing_compatible_.ok() &&
!input->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input->RandomIndexingCompatible();
}
}
}
~Dataset() override {
for (const auto& input : inputs_) {
input->Unref();
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return output_dtypes_;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t result = kInfiniteCardinality;
for (const auto& input : inputs_) {
int64_t n = input->Cardinality(options);
if (n == kUnknownCardinality) {
return kUnknownCardinality;
}
if (n != kInfiniteCardinality &&
(result == kInfiniteCardinality || n < result)) {
result = n;
}
}
return result;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
for (const auto& input : inputs_) {
inputs->push_back(input);
}
return absl::OkStatus();
}
Status CheckExternalState() const override {
for (const auto& input : inputs_) {
TF_RETURN_IF_ERROR(input->CheckExternalState());
}
return absl::OkStatus();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->reserve(output_dtypes().size());
for (int i = 0; i < inputs_.size(); ++i) {
std::vector<Tensor> input_tensors;
TF_RETURN_IF_ERROR(inputs_[i]->Get(ctx, index, &input_tensors));
out_tensors->insert(out_tensors->end(), input_tensors.begin(),
input_tensors.end());
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> input_graph_nodes;
input_graph_nodes.reserve(inputs_.size());
for (const auto& input : inputs_) {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input, &input_node));
input_graph_nodes.emplace_back(input_node);
}
TF_RETURN_IF_ERROR(b->AddDataset(
this, {}, {std::make_pair(0, input_graph_nodes)}, {}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
input_impls_.resize(dataset()->inputs_.size());
for (size_t i = 0; i < input_impls_.size(); ++i) {
TF_RETURN_IF_ERROR(dataset()->inputs_[i]->MakeIterator(
&input_contexts_[i], this, strings::StrCat(prefix(), "[", i, "]"),
&input_impls_[i]));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (input_impls_.empty()) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->reserve(dataset()->output_dtypes().size());
Status status = absl::OkStatus();
*end_of_sequence = false;
if (TF_PREDICT_FALSE(ctx->index_mapper() && !input_contexts_.empty() &&
input_contexts_.back().index_mapper() == nullptr)) {
for (IteratorContext& input_context : input_contexts_) {
input_context.SetIndexMapper(ctx->index_mapper());
}
}
for (int i = 0; i < input_impls_.size(); ++i) {
const auto& input_impl = input_impls_[i];
std::vector<Tensor> input_tensors;
bool component_end_of_sequence = false;
status.Update(input_impl->GetNext(&input_contexts_[i], &input_tensors,
&component_end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
*end_of_sequence |= component_end_of_sequence;
if (!status.ok()) {
continue;
}
if (*end_of_sequence) {
for (int j = i + 1; j < input_impls_.size(); ++j) {
Status s =
input_impls_[j]->GetNext(&input_contexts_[j], &input_tensors,
&component_end_of_sequence);
ctx->MergeCheckpoint(input_contexts_[j].checkpoint());
}
break;
}
out_tensors->insert(out_tensors->end(), input_tensors.begin(),
input_tensors.end());
}
if (*end_of_sequence || !status.ok()) {
out_tensors->clear();
}
if (*end_of_sequence) {
input_impls_.clear();
}
return status;
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputImplsEmpty,
static_cast<int64_t>(input_impls_.empty())));
for (auto& input_impl : input_impls_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t inputs_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplsEmpty, &inputs_empty));
if (ctx->restored_element_count()) {
if (input_impls_.size() != dataset()->inputs_.size()) {
return absl::FailedPreconditionError(
"`Initialize` should be called before restoring from the "
"checkpoint.");
}
if (ctx->index_mapper() == nullptr) {
return absl::FailedPreconditionError(
"ctx->index_mapper() should be provided along with "
"ctx->restored_element_count() when restoring.");
}
if (static_cast<bool>(inputs_empty)) {
input_impls_.clear();
} else {
for (int i = 0; i < input_impls_.size(); ++i) {
input_contexts_[i].set_restored_element_count(
ctx->restored_element_count().value());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[i], reader, input_impls_[i]));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
}
}
return absl::OkStatus();
}
if (static_cast<bool>(inputs_empty)) {
input_impls_.clear();
} else {
DCHECK_EQ(input_impls_.size(), dataset()->inputs_.size());
for (auto& input_impl : input_impls_)
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl));
}
return absl::OkStatus();
}
private:
mutex mu_;
std::vector<std::unique_ptr<IteratorBase>> input_impls_ TF_GUARDED_BY(mu_);
std::vector<IteratorContext> input_contexts_ TF_GUARDED_BY(mu_);
};
const std::vector<DatasetBase*> inputs_;
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
};
ZipDatasetOp::ZipDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {}
void ZipDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
std::vector<DatasetBase*> inputs;
for (size_t i = 0; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
inputs.push_back(input);
}
*output = new Dataset(ctx, inputs);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ZipDataset").Device(DEVICE_CPU), ZipDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/zip_dataset_op.h"
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "zip_dataset";
class ZipDatasetParams : public DatasetParams {
public:
template <typename T>
ZipDatasetParams(std::vector<T> input_dataset_params,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int num_input_datasets, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_input_datasets_(num_input_datasets) {
for (auto& params : input_dataset_params) {
input_dataset_params_.push_back(std::make_unique<T>(params));
}
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params[0].dataset_type(),
input_dataset_params[0].iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
for (int i = 0; i < num_input_datasets_; ++i) {
input_names->emplace_back(
absl::StrCat(ZipDatasetOp::kDatasetType, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("N", num_input_datasets_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ZipDatasetOp::kDatasetType; }
private:
int32 num_input_datasets_;
};
class ZipDatasetOpTest : public DatasetOpsTestBase {};
ZipDatasetParams ZipDatasetParams1() {
return ZipDatasetParams(
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
ZipDatasetParams ZipDatasetParams2() {
return ZipDatasetParams(
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 15, 1)},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
std::vector<GetNextTestCase<ZipDatasetParams>> GetNextTestCases() {
return {{ZipDatasetParams1(),
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})},
{ZipDatasetParams2(),
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})}};
}
ITERATOR_GET_NEXT_TEST_P(ZipDatasetOpTest, ZipDatasetParams, GetNextTestCases())
TEST_F(ZipDatasetOpTest, DatasetNodeName) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ZipDatasetOpTest, DatasetTypeString) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ZipDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ZipDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{ZipDatasetParams1(),
{DT_INT64, DT_INT64}},
{ZipDatasetParams2(),
{DT_INT64, DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<ZipDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ZipDatasetParams1(),
{PartialTensorShape({}),
PartialTensorShape({})}},
{ZipDatasetParams2(),
{PartialTensorShape({}),
PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ZipDatasetParams>> CardinalityTestCases() {
return {{ZipDatasetParams1(),
3},
{ZipDatasetParams2(),
3}};
}
DATASET_CARDINALITY_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ZipDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{ZipDatasetParams1(),
{DT_INT64, DT_INT64}},
{ZipDatasetParams2(),
{DT_INT64, DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<ZipDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ZipDatasetParams1(),
{PartialTensorShape({}),
PartialTensorShape({})}},
{ZipDatasetParams2(),
{PartialTensorShape({}),
PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ZipDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ZipDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ZipDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ZipDatasetParams1(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})},
{ZipDatasetParams2(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/zip_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/zip_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
550b3692-2d88-4054-bf6c-3b1158ebeda3 | cpp | tensorflow/tensorflow | filter_dataset_op | tensorflow/core/kernels/data/filter_dataset_op.cc | tensorflow/core/kernels/data/filter_dataset_op_test.cc | #include "tensorflow/core/kernels/data/filter_dataset_op.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const FilterDatasetOp::kDatasetType;
constexpr const char* const FilterDatasetOp::kInputDataset;
constexpr const char* const FilterDatasetOp::kOtherArguments;
constexpr const char* const FilterDatasetOp::kPredicate;
constexpr const char* const FilterDatasetOp::kTarguments;
constexpr const char* const FilterDatasetOp::kOutputTypes;
constexpr const char* const FilterDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kFilteredElements[] = "filtered_elements";
constexpr char kDroppedElements[] = "dropped_elements";
class FilterDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_graph_node}}, {{1, other_arguments}},
{{kPredicate, f}, {kTarguments, other_arguments_types_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
filtered_elements_(0),
dropped_elements_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
auto stats_aggregator = ctx->stats_aggregator();
bool matched;
do {
{
tf_shared_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
}
if (*end_of_sequence) {
mutex_lock l(mu_);
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
auto status = instantiated_captured_func_->RunWithBorrowedArgs(
ctx, *out_tensors, &result, model_node());
if (!status.ok()) {
return AddErrorContext(status);
}
if (result.size() != 1 || result[0].dtype() != DT_BOOL ||
result[0].NumElements() != 1) {
out_tensors->clear();
return errors::InvalidArgument(
"Filter predicate `f` must return a scalar bool.");
}
matched = result[0].scalar<bool>()();
if (!matched) {
out_tensors->clear();
{
mutex_lock l(mu_);
dropped_elements_++;
}
if (stats_aggregator) {
mutex_lock l(mu_);
stats_aggregator->AddScalar(
stats_utils::DroppedElementsScalarName(dataset()->node_name()),
static_cast<float>(dropped_elements_), num_elements());
stats_aggregator->IncrementCounter(dataset()->node_name(),
stats_utils::kDroppedElements,
static_cast<float>(1));
}
}
} while (!matched);
{
mutex_lock l(mu_);
filtered_elements_++;
}
if (stats_aggregator) {
mutex_lock l(mu_);
stats_aggregator->AddScalar(
stats_utils::FilterdElementsScalarName(dataset()->node_name()),
static_cast<float>(filtered_elements_), num_elements());
stats_aggregator->IncrementCounter(dataset()->node_name(),
stats_utils::kFilteredElements,
static_cast<float>(1));
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeUnknownRatioNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kFilteredElements, filtered_elements_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kDroppedElements, dropped_elements_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(input_empty)) {
input_impl_.reset();
} else {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
}
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kFilteredElements, &filtered_elements_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kDroppedElements, &dropped_elements_));
return absl::OkStatus();
}
data::TraceMeMetadata GetTraceMeMetadata() const override {
tf_shared_lock l(mu_);
data::TraceMeMetadata result;
result.push_back(std::make_pair(
"passed",
strings::Printf("%lld", static_cast<long long>(filtered_elements_))));
result.push_back(std::make_pair(
"filtered",
strings::Printf("%lld", static_cast<long long>(dropped_elements_))));
return result;
}
private:
mutable mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t filtered_elements_ TF_GUARDED_BY(mu_);
int64_t dropped_elements_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
};
FilterDatasetOp::FilterDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kPredicate, {},
&func_metadata_));
OP_REQUIRES(ctx, func_metadata_->short_circuit_info().indices.size() <= 1,
errors::InvalidArgument(
"predicate function has more than one return value."));
}
void FilterDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FilterDataset").Device(DEVICE_CPU),
FilterDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("FilterDataset");
}
}
} | #include "tensorflow/core/kernels/data/filter_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "filter_dataset";
class FilterDatasetParams : public DatasetParams {
public:
template <typename T>
FilterDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper pred_func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
pred_func_(std::move(pred_func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return other_arguments_;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size());
input_names->emplace_back(FilterDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(FilterDatasetOp::kOtherArguments, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"predicate", pred_func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override { return FilterDatasetOp::kDatasetType; }
private:
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper pred_func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class FilterDatasetOpTest : public DatasetOpsTestBase {};
FilterDatasetParams FilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
FilterDatasetParams FilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("GetUnique",
{{"T", DT_INT64}, {"out_idx", DT_INT32}}),
{test::function::Unique()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("NonZero", {{"T", DT_INT64}}),
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<FilterDatasetParams>> GetNextTestCases() {
return {{FilterDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{FilterDatasetParams2(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
GetNextTestCases())
TEST_F(FilterDatasetOpTest, DatasetNodeName) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FilterDatasetOpTest, DatasetTypeString) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FilterDatasetOp::kDatasetType)));
}
TEST_F(FilterDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<FilterDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FilterDatasetParams1(),
{PartialTensorShape({1})}},
{FilterDatasetParams2(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<FilterDatasetParams>> CardinalityTestCases() {
return {{FilterDatasetParams1(),
kUnknownCardinality},
{FilterDatasetParams2(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
CardinalityTestCases())
TEST_F(FilterDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<FilterDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FilterDatasetParams1(),
{PartialTensorShape({1})}},
{FilterDatasetParams2(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(FilterDatasetOpTest, IteratorPrefix) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FilterDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<FilterDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FilterDatasetParams1(),
{0, 2, 6},
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{FilterDatasetParams2(),
{0, 2, 6},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidPredicateFuncTest
: public FilterDatasetOpTest,
public ::testing::WithParamInterface<FilterDatasetParams> {};
TEST_P(ParameterizedInvalidPredicateFuncTest, InvalidPredicateFunc) {
auto dataset_params = GetParam();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_SUITE_P(
FilterDatasetOpTest, ParameterizedInvalidPredicateFuncTest,
::testing::ValuesIn({InvalidPredFuncFilterDatasetParams1(),
InvalidPredFuncFilterDatasetParams2(),
InvalidPredFuncFilterDatasetParams3()}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/filter_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/filter_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c00b722e-3972-4f34-b0cf-754b2294e5cd | cpp | tensorflow/tensorflow | shuffle_dataset_op | tensorflow/core/kernels/data/shuffle_dataset_op.cc | tensorflow/core/kernels/data/shuffle_dataset_op_test.cc | #include "tensorflow/core/kernels/data/shuffle_dataset_op.h"
#include <cstdint>
#include <deque>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShuffleDatasetOpBase::kInputDataset;
constexpr const char* const ShuffleDatasetOpBase::kBufferSize;
constexpr const char* const ShuffleDatasetOpBase::kSeed;
constexpr const char* const ShuffleDatasetOpBase::kSeed2;
constexpr const char* const ShuffleDatasetOpBase::kOutputTypes;
constexpr const char* const ShuffleDatasetOpBase::kOutputShapes;
constexpr const char* const
ShuffleDatasetOpBase::kReshuffleEachIteration;
constexpr const char* const ShuffleDatasetOp::kDatasetType;
constexpr const char* const
ShuffleAndRepeatDatasetOp::kDatasetType;
constexpr const char* const ShuffleAndRepeatDatasetOp::kCount;
const int64_t kLogIntervalMicros = 10 * 1000000;
const int64_t kMaxEpochsInBuffer = 3;
constexpr char kNumRandomSamples[] = "num_random_samples";
constexpr char kDataProduced[] = "data_produced";
constexpr char kEndOfInputSequence[] = "end_of_input_sequence";
constexpr char kEpoch[] = "epoch";
constexpr char kNumElements[] = "num_elements";
constexpr char kSlicesSize[] = "slices_size";
constexpr char kSlicesStart[] = "slices_start";
constexpr char kSlicesEnd[] = "slices_end";
constexpr char kSlicesReachedEndOfSequence[] = "slices_reached_end_of_sequence";
constexpr char kSeedGenerator[] = "SeedGenerator";
constexpr char kEpochNumRandomSamples[] = "epoch_num_random_samples";
constexpr char kShuffleDatasetV1[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3";
constexpr char kShuffleAndRepeatDatasetV1[] = "ShuffleAndRepeatDataset";
constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2";
ShuffleDatasetOpBase::ShuffleDatasetOpBase(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
class ShuffleDatasetOpBase::ShuffleDatasetBase : public DatasetBase {
public:
ShuffleDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
int64_t buffer_size,
std::shared_ptr<SeedGenerator> seed_generator,
int64_t count)
: DatasetBase(DatasetContext(ctx)),
input_(input),
buffer_size_(buffer_size),
seed_generator_(std::move(seed_generator)),
count_(count),
traceme_metadata_(
{{"buffer_size",
strings::Printf("%lld", static_cast<long long>(buffer_size))}}) {
input_->Ref();
}
~ShuffleDatasetBase() override { input_->Unref(); }
virtual string op_type() const = 0;
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (count_ == -1 || input_->Cardinality(options) == kInfiniteCardinality) {
return kInfiniteCardinality;
} else if (input_->Cardinality(options) == kUnknownCardinality) {
return kUnknownCardinality;
} else {
return input_->Cardinality(options) * count_;
}
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
{
mutex_lock l(mu_);
if (shuffled_indices_.empty()) {
InitializeRandomAccessIndices();
}
}
int64 shuffled_index;
{
tf_shared_lock l(mu_);
shuffled_index = shuffled_indices_[index];
}
TF_RETURN_IF_ERROR(input_->Get(ctx, shuffled_index, out_tensors));
return absl::OkStatus();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(buffer_size_, seed_generator_->seed(),
seed_generator_->seed2(), count_);
return name_utils::DatasetDebugString(op_type(), params);
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, name_utils::IteratorPrefix(op_type(), prefix)},
seed_generator_.get());
}
void InitializeRandomAccessIndices() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const int64 cardinality = Cardinality();
shuffled_indices_ = std::vector<std::int64_t>(cardinality);
std::iota(shuffled_indices_.begin(), shuffled_indices_.end(), 0);
int64_t shuffled_index = 0;
random::PhiloxRandom parent_generator =
random::PhiloxRandom(seed_generator_->seed(), seed_generator_->seed2());
random::SingleSampleAdapter<random::PhiloxRandom> generator =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator);
while (shuffled_index < cardinality) {
int64_t offset = generator() % (cardinality - shuffled_index);
std::swap(shuffled_indices_[shuffled_index + offset],
shuffled_indices_[shuffled_index]);
shuffled_index += 1;
}
}
protected:
class Iterator : public DatasetIterator<ShuffleDatasetBase> {
public:
explicit Iterator(const Params& params, SeedGenerator* seed_generator)
: DatasetIterator<ShuffleDatasetBase>(params),
seed_generator_(seed_generator),
parent_generator_(seed_generator->seed(), seed_generator->seed2()),
generator_(&parent_generator_) {
if (params.dataset->buffer_size_ == kUnknownCardinality) {
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>();
} else {
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>(
params.dataset->buffer_size_);
}
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
if (ctx->symbolic_checkpoint()) {
for (int64_t i = 0; i < buffer_->size(); ++i) {
checkpoint_indices_.insert(i);
}
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(FillBuffer(ctx));
if (num_elements_ == 0) {
DCHECK(input_impl_ == nullptr);
*end_of_sequence = true;
return absl::OkStatus();
}
*end_of_sequence = false;
ClearEmptySlices();
DCHECK(!slices_.empty());
int64_t offset =
Random() % (slices_.front()->end - slices_.front()->start);
int64_t index = (slices_.front()->start + offset) % buffer_->size();
*out_tensors = std::move(buffer_->at(index));
this->RecordBufferDequeue(ctx, *out_tensors);
std::swap(buffer_->at(index),
buffer_->at(slices_.front()->start % buffer_->size()));
checkpoint_indices_.insert(index);
checkpoint_indices_.insert(slices_.front()->start % buffer_->size());
slices_.front()->start++;
num_elements_--;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seed_, seed2_);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kEpochNumRandomSamples,
seed_generator_->num_random_samples()));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNumRandomSamples,
num_random_samples_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kSeed, seed_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kSeed2, seed2_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kEndOfInputSequence, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(this->SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kEpoch, epoch_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNumElements, num_elements_));
const std::string key_prefix = absl::StrCat(prefix(), kColon, "buffer");
if (ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(UpdateCheckpointElements(
writer, key_prefix, *buffer_, checkpoint_indices_));
checkpoint_indices_.clear();
} else {
TF_RETURN_IF_ERROR(
WriteElementsToCheckpoint(writer, key_prefix, *buffer_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kSlicesSize, slices_.size()));
for (size_t i = 0; i < slices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrJoin(std::make_tuple(kSlicesStart, i), "_"),
slices_[i]->start));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrJoin(std::make_tuple(kSlicesEnd, i), "_"),
slices_[i]->end));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(),
absl::StrJoin(std::make_tuple(kSlicesReachedEndOfSequence, i), "_"),
static_cast<int64_t>(slices_[i]->reached_end_of_sequence)));
}
if (data_produced_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(this->prefix(), kDataProduced, ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t num_random_samples;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kEpochNumRandomSamples,
&num_random_samples));
seed_generator_->set_num_random_samples(num_random_samples);
seed_generator_->Reset();
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNumRandomSamples,
&num_random_samples_));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kSeed, &seed_));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kSeed2, &seed2_));
ResetRngs();
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kEndOfInputSequence, &input_empty));
if (static_cast<bool>(!input_empty)) {
TF_RETURN_IF_ERROR(this->dataset()->input_->MakeIterator(
ctx, this, this->prefix(), &input_impl_));
TF_RETURN_IF_ERROR(this->RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
TF_RETURN_IF_ERROR(reader->ReadScalar(this->prefix(), kEpoch, &epoch_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(this->prefix(), kNumElements, &num_elements_));
size_t slices_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(this->prefix(), kSlicesSize, &temp));
slices_size = static_cast<size_t>(temp);
}
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>();
TF_RETURN_IF_ERROR(ReadElementsFromCheckpoint(
ctx, reader, absl::StrCat(prefix(), kColon, "buffer"),
buffer_.get()));
if (ctx->symbolic_checkpoint()) {
DCHECK(checkpoint_indices_.empty());
for (size_t i = 0; i < buffer_->size(); ++i) {
checkpoint_indices_.insert(i);
}
}
for (const auto& element : *buffer_) {
RecordBufferEnqueue(ctx, element);
}
if (!IsShuffleAll()) {
buffer_->resize(dataset()->buffer_size_);
}
slices_.clear();
for (size_t i = 0; i < slices_size; ++i) {
int64_t start;
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->prefix(),
absl::StrJoin(std::make_tuple(kSlicesStart, i), "_"), &start));
int64_t end;
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->prefix(), absl::StrJoin(std::make_tuple(kSlicesEnd, i), "_"),
&end));
int64_t reached_end_of_sequence;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(),
absl::StrJoin(std::make_tuple(kSlicesReachedEndOfSequence, i), "_"),
&reached_end_of_sequence));
slices_.push_back(std::make_unique<Slice>(
start, end, static_cast<bool>(reached_end_of_sequence)));
}
data_produced_ = reader->Contains(this->prefix(), kDataProduced);
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return this->dataset()->traceme_metadata_;
}
private:
struct Slice {
Slice(int64_t start, int64_t end, bool reached_end_of_sequence)
: start(start),
end(end),
reached_end_of_sequence(reached_end_of_sequence) {}
int64_t start;
int64_t end;
bool reached_end_of_sequence = false;
};
random::SingleSampleAdapter<random::PhiloxRandom>::ResultType Random()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
num_random_samples_++;
auto out = generator_();
return out;
}
bool IsServingSliceComplete() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (auto& slice : slices_) {
if (slice->start != slice->end) {
return slice->reached_end_of_sequence;
}
}
return false;
}
bool IsShuffleAll() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return dataset()->buffer_size_ == kUnknownCardinality;
}
Status FillBuffer(IteratorContext* ctx) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t start_micros = EnvTime::NowMicros();
int64_t num_log_entries = 0;
while (ShouldFillBuffer()) {
if (EnvTime::NowMicros() >
((num_log_entries + 1) * kLogIntervalMicros) + start_micros) {
num_log_entries++;
LOG_EVERY_N_SEC(INFO, 10)
<< dataset()->metadata().name() << ": "
<< "Filling up shuffle buffer (this may take a while): "
<< num_elements_ << " of " << BufferSizeString();
}
if (!input_impl_) {
TF_RETURN_IF_ERROR(PrepareNextEpoch(ctx));
}
std::vector<Tensor> input_element;
bool end_of_input_sequence = false;
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, &input_element, &end_of_input_sequence));
if (end_of_input_sequence) {
slices_.back()->reached_end_of_sequence = true;
}
if (!end_of_input_sequence) {
AddToShuffleBuffer(ctx, std::move(input_element));
continue;
}
input_impl_.reset();
if (ctx->split_providers().empty() && !data_produced_ &&
this->dataset()->count_ == -1) {
return absl::OkStatus();
}
}
if (num_log_entries > 0) {
LOG(INFO) << "Shuffle buffer filled.";
}
return absl::OkStatus();
}
bool ShouldFillBuffer() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!input_impl_ && dataset()->count_ != -1 &&
epoch_ >= dataset()->count_) {
return false;
}
if (slices_.size() > kMaxEpochsInBuffer && num_elements_ > 0) {
return false;
}
if (IsShuffleAll() && (slices_.empty() || !IsServingSliceComplete())) {
return true;
}
return num_elements_ < buffer_->size();
}
Status PrepareNextEpoch(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (epoch_ == 0) {
slices_.push_back(std::make_unique<Slice>(0, 0, false));
} else {
int64_t n = slices_.back()->end;
slices_.push_back(std::make_unique<Slice>(n, n, false));
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
}
TF_RETURN_IF_ERROR(this->dataset()->input_->MakeIterator(
ctx, this, this->prefix(), &input_impl_));
epoch_++;
return absl::OkStatus();
}
void AddToShuffleBuffer(IteratorContext* ctx, std::vector<Tensor>&& element)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
data_produced_ = true;
if (num_elements_ == 0) {
VLOG(1) << "Starting to fill up shuffle buffer of size: "
<< BufferSizeString();
}
this->RecordBufferEnqueue(ctx, element);
if (num_elements_ == buffer_->size()) {
DCHECK(IsShuffleAll());
checkpoint_indices_.insert(buffer_->size());
buffer_->push_back(element);
} else {
size_t index = slices_.back()->end % buffer_->size();
checkpoint_indices_.insert(index);
buffer_->at(index) = std::move(element);
}
num_elements_++;
slices_.back()->end++;
}
void ClearEmptySlices() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
while (slices_.front()->start == slices_.front()->end) {
slices_.pop_front();
num_random_samples_ = 0;
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
}
}
std::string BufferSizeString() {
return absl::StrCat(dataset()->buffer_size_);
}
mutex mu_;
SeedGenerator* const seed_generator_ TF_GUARDED_BY(mu_);
std::unique_ptr<std::vector<std::vector<Tensor>>> buffer_
TF_GUARDED_BY(mu_);
absl::flat_hash_set<int64_t> checkpoint_indices_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_) = nullptr;
int64_t epoch_ TF_GUARDED_BY(mu_) = 0;
int64_t num_elements_ TF_GUARDED_BY(mu_) = 0;
int64_t seed_ TF_GUARDED_BY(mu_) = 0;
int64_t seed2_ TF_GUARDED_BY(mu_) = 0;
std::deque<std::unique_ptr<Slice>> slices_ TF_GUARDED_BY(mu_);
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
bool data_produced_ TF_GUARDED_BY(mu_) = false;
};
const DatasetBase* const input_;
const int64_t buffer_size_;
const std::shared_ptr<SeedGenerator> seed_generator_;
const int64_t count_;
const TraceMeMetadata traceme_metadata_;
mutable mutex mu_;
mutable std::vector<std::int64_t> shuffled_indices_ TF_GUARDED_BY(mu_);
};
class ShuffleDatasetOp::Dataset : public ShuffleDatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~Dataset() override {
manager_->Unref();
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
AttrValue reshuffle_each_iteration;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, buffer_size_node, seed_node, seed2_node},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
class ShuffleDatasetOp::DatasetV2 : public ShuffleDatasetBase {
public:
DatasetV2(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()) {}
~DatasetV2() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, buffer_size_node, resource_handle_node},
{},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
};
class ShuffleDatasetOp::DatasetV3 : public ShuffleDatasetBase {
public:
DatasetV3(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~DatasetV3() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
AttrValue reshuffle_each_iteration;
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{input_graph_node, buffer_size_node, seed_node,
seed2_node, resource_handle_node},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
ShuffleDatasetOp::ShuffleDatasetOp(OpKernelConstruction* ctx)
: ShuffleDatasetOpBase(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kShuffleDatasetV3) {
op_version_ = 3;
} else if (op_name == kShuffleDatasetV2) {
op_version_ = 2;
} else if (op_name == kShuffleDatasetV1) {
op_version_ = 1;
}
if (ctx->HasAttr(kReshuffleEachIteration)) {
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kReshuffleEachIteration, &reshuffle_each_iteration_));
}
}
void ShuffleDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t buffer_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(
ctx, buffer_size > 0 || buffer_size == kUnknownCardinality,
errors::InvalidArgument(
"buffer_size must be greater than zero or UNKNOWN_CARDINALITY"));
int64_t count = 1;
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_",
resource_id_counter.fetch_add(1));
if (op_version_ == 3) {
auto handle = HandleFromInput(ctx, 4);
SeedGeneratorManager* manager = nullptr;
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
RandomSeeds seeds(seed, seed2);
bool owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGenerator>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output = new ShuffleDatasetOp::DatasetV3(ctx, input, buffer_size, count,
std::move(seeds), manager,
std::move(handle), owns_resource);
} else if (op_version_ == 2) {
auto handle = HandleFromInput(ctx, 2);
SeedGeneratorManager* manager = nullptr;
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
bool owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
LOG(WARNING) << "Failed to find seed generator resource. Falling back to "
"using a non-deterministically seeded generator and "
"reshuffling each iteration.";
RandomSeeds seeds(0, 0);
OP_REQUIRES_OK(
ctx, ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[&seeds](SeedGeneratorManager** manager) {
*manager = new SeedGeneratorManager(
new RandomSeedGenerator(seeds));
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGeneratorManager>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output =
new ShuffleDatasetOp::DatasetV2(ctx, input, buffer_size, count, manager,
std::move(handle), owns_resource);
} else {
if (op_version_ != 1) {
LOG(WARNING) << "Unsupported version of shuffle dataset op: "
<< op_version_ << ". Defaulting to version 1.";
}
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
RandomSeeds seeds(seed, seed2);
SeedGeneratorManager* manager;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
auto handle =
MakeResourceHandle<SeedGeneratorManager>(ctx, container, name);
*output = new ShuffleDatasetOp::Dataset(ctx, input, buffer_size, count,
std::move(seeds), manager,
std::move(handle));
}
}
class ShuffleAndRepeatDatasetOp::Dataset : public ShuffleDatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
RandomSeeds&& seeds, SeedGeneratorManager* manager, int64_t count,
ResourceHandle&& resource_handle)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~Dataset() override {
manager_->Unref();
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size = nullptr;
Node* seed = nullptr;
Node* seed2 = nullptr;
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2));
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
AttrValue reshuffle_each_iteration;
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_graph_node, buffer_size, seed, seed2, count},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
class ShuffleAndRepeatDatasetOp::DatasetV2 : public ShuffleDatasetBase {
public:
DatasetV2(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~DatasetV2() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
Node* count_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
AttrValue reshuffle_each_iteration;
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{input_graph_node, buffer_size_node, seed_node,
seed2_node, count_node, resource_handle_node},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
ShuffleAndRepeatDatasetOp::ShuffleAndRepeatDatasetOp(OpKernelConstruction* ctx)
: ShuffleDatasetOpBase(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kShuffleAndRepeatDatasetV2) {
op_version_ = 2;
} else if (op_name == kShuffleAndRepeatDatasetV1) {
op_version_ = 1;
}
if (ctx->HasAttr(kReshuffleEachIteration)) {
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kReshuffleEachIteration, &reshuffle_each_iteration_));
}
}
void ShuffleAndRepeatDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t buffer_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(
ctx, buffer_size > 0 || buffer_size == kUnknownCardinality,
errors::InvalidArgument(
"buffer_size must be greater than zero or UNKNOWN_CARDINALITY"));
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
OP_REQUIRES(ctx, count > 0 || count == -1,
errors::InvalidArgument(
"count must be greater than zero or equal to -1."));
RandomSeeds seeds(seed, seed2);
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_",
resource_id_counter.fetch_add(1));
if (op_version_ == 2) {
auto handle = HandleFromInput(ctx, 5);
SeedGeneratorManager* manager = nullptr;
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
bool owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGenerator>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output = new ShuffleAndRepeatDatasetOp::DatasetV2(
ctx, input, buffer_size, count, std::move(seeds), manager,
std::move(handle), owns_resource);
} else {
if (op_version_ != 1) {
LOG(WARNING) << "Unsupported version of shuffle dataset op: "
<< op_version_ << ". Defaulting to version 1.";
}
SeedGeneratorManager* manager;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
auto handle =
MakeResourceHandle<SeedGeneratorManager>(ctx, container, name);
*output = new Dataset(ctx, input, buffer_size, std::move(seeds), manager,
count, std::move(handle));
}
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ShuffleDataset").Device(DEVICE_CPU),
ShuffleDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleDatasetV2").Device(DEVICE_CPU),
ShuffleDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleDatasetV3").Device(DEVICE_CPU),
ShuffleDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleAndRepeatDataset").Device(DEVICE_CPU),
ShuffleAndRepeatDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleAndRepeatDatasetV2").Device(DEVICE_CPU),
ShuffleAndRepeatDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/shuffle_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kShuffleNodeName[] = "shuffle_dataset";
constexpr char kShuffleAndRepeatNodeName[] = "shuffle_and_repeat_dataset";
class ShuffleDatasetParams : public DatasetParams {
public:
template <typename T>
ShuffleDatasetParams(T input_dataset_params, int64_t buffer_size,
int64_t seed, int64_t seed2, int64_t count,
bool reshuffle_each_iteration,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
buffer_size_(buffer_size),
seed_(seed),
seed2_(seed2),
count_(count),
reshuffle_each_iteration_(reshuffle_each_iteration) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = {
CreateTensor<int64_t>(TensorShape({}), {buffer_size_}),
CreateTensor<int64_t>(TensorShape({}), {seed_}),
CreateTensor<int64_t>(TensorShape({}), {seed2_})};
if (count_ != 1) {
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {count_}));
}
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShuffleDatasetOpBase::kInputDataset);
input_names->emplace_back(ShuffleDatasetOpBase::kBufferSize);
input_names->emplace_back(ShuffleDatasetOpBase::kSeed);
input_names->emplace_back(ShuffleDatasetOpBase::kSeed2);
if (count_ != 1) {
input_names->emplace_back(ShuffleAndRepeatDatasetOp::kCount);
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("reshuffle_each_iteration",
reshuffle_each_iteration_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
if (count_ != 1) {
return ShuffleAndRepeatDatasetOp::kDatasetType;
}
return ShuffleDatasetOp::kDatasetType;
}
int64_t count() const { return count_; }
private:
int64_t buffer_size_;
int64_t seed_;
int64_t seed2_;
int64_t count_;
bool reshuffle_each_iteration_;
};
class ShuffleDatasetOpTest : public DatasetOpsTestBase {};
ShuffleDatasetParams ShuffleDatasetParams1() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
3,
1,
2,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams2() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams3() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
2,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams4() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
2,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams5() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
1,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams6() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
10,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams7() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
1,
2,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams8() {
return ShuffleDatasetParams(RangeDatasetParams(0, 3, 1),
10,
1,
2,
-1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleDatasetParamsWithUnknownCardinality() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
-2,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParamsWithInvalidBufferSize() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
-1,
1,
2,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleAndRepeatDatasetParamsWithInvalidBufferSize() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
-1,
1,
2,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleAndRepeatDatasetParamsWithInvalidCount() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
10,
1,
2,
0,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
template <typename T>
struct GetNextTestCase {
T dataset_params;
std::vector<Tensor> expected_shuffle_outputs;
std::vector<Tensor> expected_reshuffle_outputs;
};
std::vector<GetNextTestCase<ShuffleDatasetParams>> GetNextTestCases() {
return {
{ShuffleDatasetParams1(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}}),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}})},
{ShuffleDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {6}, {0}, {5}, {2}, {7}, {4}, {3}, {9}, {8}})},
{ShuffleDatasetParams3(),
CreateTensors<int64_t>(
TensorShape({}), {{0}, {2}, {1}, {3}, {5}, {6}, {4}, {7}, {8}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {0}, {2}, {3}, {4}, {5}, {6}, {7}, {9}, {8}})},
{ShuffleDatasetParams4(),
CreateTensors<int64_t>(
TensorShape({}), {{3}, {0}, {8}, {1}, {5}, {4}, {7}, {2}, {6}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{4}, {6}, {9}, {0}, {1}, {8}, {2}, {7}, {3}, {5}})},
{ShuffleDatasetParams5(),
CreateTensors<int64_t>(
TensorShape({}), {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{ShuffleDatasetParams6(),
{},
{}},
{ShuffleDatasetParams7(),
CreateTensors<int64_t>(
TensorShape({}), {{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}}),
CreateTensors<int64_t>(
TensorShape({}),
{{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}})},
{ShuffleDatasetParams8(),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}}),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})},
{ShuffleDatasetParamsWithUnknownCardinality(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {6}, {0}, {5}, {2}, {7}, {4}, {3}, {9}, {8}})}};
}
class ParameterizedGetNextTest : public ShuffleDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<ShuffleDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> shuffled_out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
shuffled_out_tensors.insert(shuffled_out_tensors.end(), next.begin(),
next.end());
if (test_case.dataset_params.count() == -1 &&
shuffled_out_tensors.size() ==
test_case.expected_shuffle_outputs.size()) {
break;
}
}
end_of_sequence = false;
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
std::vector<Tensor> reshuffled_out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
reshuffled_out_tensors.insert(reshuffled_out_tensors.end(), next.begin(),
next.end());
if (test_case.dataset_params.count() == -1 &&
reshuffled_out_tensors.size() ==
test_case.expected_shuffle_outputs.size()) {
break;
}
}
TF_EXPECT_OK(ExpectEqual(shuffled_out_tensors,
test_case.expected_shuffle_outputs,
true));
TF_EXPECT_OK(ExpectEqual(reshuffled_out_tensors,
test_case.expected_reshuffle_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(ShuffleDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
std::vector<DatasetNodeNameTestCase<ShuffleDatasetParams>>
DatasetNodeNameTestCases() {
return {{ShuffleDatasetParams1(),
kShuffleNodeName},
{ShuffleDatasetParams7(),
kShuffleAndRepeatNodeName}};
}
DATASET_NODE_NAME_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
DatasetNodeNameTestCases())
std::vector<DatasetTypeStringTestCase<ShuffleDatasetParams>>
DatasetTypeStringTestCases() {
return {{ShuffleDatasetParams1(),
name_utils::OpName(
ShuffleDatasetOp::kDatasetType)},
{ShuffleDatasetParams7(),
name_utils::OpName(ShuffleAndRepeatDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
DatasetTypeStringTestCases())
TEST_F(ShuffleDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShuffleDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<ShuffleDatasetParams>> CardinalityTestCases() {
return {{ShuffleDatasetParams1(),
10},
{ShuffleDatasetParams2(),
10},
{ShuffleDatasetParams3(),
10},
{ShuffleDatasetParams4(),
10},
{ShuffleDatasetParams5(),
10},
{ShuffleDatasetParams6(),
0},
{ShuffleDatasetParams7(),
20},
{ShuffleDatasetParams8(),
kInfiniteCardinality},
{ShuffleDatasetParamsWithUnknownCardinality(),
10}};
}
DATASET_CARDINALITY_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
CardinalityTestCases())
TEST_F(ShuffleDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShuffleDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(ShuffleDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShuffleDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
template <typename T>
struct IteratorSaveAndRestoreTestCase {
T dataset_params;
std::vector<int> breakpoints;
std::vector<Tensor> expected_shuffle_outputs;
};
std::vector<IteratorSaveAndRestoreTestCase<ShuffleDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ShuffleDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}})},
{ShuffleDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}})},
{ShuffleDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {2}, {1}, {3}, {5}, {6}, {4}, {7}, {8}, {9}})},
{ShuffleDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{3}, {0}, {8}, {1}, {5}, {4}, {7}, {2}, {6}, {9}})},
{ShuffleDatasetParams5(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{ShuffleDatasetParams6(),
{0, 4, 11},
{}},
{ShuffleDatasetParams7(),
{0, 5, 22},
CreateTensors<int64_t>(
TensorShape({}),
{{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}})},
{ShuffleDatasetParams8(),
{0, 5, 20},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})},
{ShuffleDatasetParamsWithUnknownCardinality(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}})}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public ShuffleDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<ShuffleDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, IteratorSaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_shuffle_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(ShuffleDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(ShuffleDatasetOpTest, InvalidArguments) {
std::vector<ShuffleDatasetParams> dataset_params_vec(
{ShuffleDatasetParamsWithInvalidBufferSize(),
ShuffleAndRepeatDatasetParamsWithInvalidBufferSize(),
ShuffleAndRepeatDatasetParamsWithInvalidCount()});
for (const auto& dataset_params : dataset_params_vec) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shuffle_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shuffle_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f88f8392-47f6-4a36-91b7-14325d137a41 | cpp | tensorflow/tensorflow | repeat_dataset_op | tensorflow/core/kernels/data/repeat_dataset_op.cc | tensorflow/core/kernels/data/repeat_dataset_op_test.cc | #include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const RepeatDatasetOp::kDatasetType;
constexpr const char* const RepeatDatasetOp::kInputDataset;
constexpr const char* const RepeatDatasetOp::kCount;
constexpr const char* const RepeatDatasetOp::kOutputTypes;
constexpr const char* const RepeatDatasetOp::kOutputShapes;
namespace {
constexpr char kForeverRepeat[] = "ForeverRepeat";
constexpr char kEmptyRepeat[] = "EmptyRepeat";
constexpr char kFiniteRepeat[] = "FiniteRepeat";
constexpr char kCurIteration[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kUninitialized[] = "uninitialized";
constexpr int64_t kKnownRatio = 1;
std::string nested_prefix(const std::string& prefix, int64_t epoch) {
return strings::StrCat(prefix, "[", epoch, "]");
}
bool HasDataServiceInput(const DatasetBase* dataset) {
DCHECK(dataset != nullptr);
if (absl::StartsWith(dataset->type_string(), "DataServiceDataset")) {
return true;
}
std::vector<const DatasetBase*> inputs;
Status s = dataset->InputDatasets(&inputs);
if (!s.ok()) {
return false;
}
for (const DatasetBase* input : inputs) {
if (HasDataServiceInput(input)) {
return true;
}
}
return false;
}
class RepeatedSplitProvider : public SplitProvider {
public:
explicit RepeatedSplitProvider(std::unique_ptr<SplitProvider> split_provider,
int64_t count)
: split_provider_(std::move(split_provider)), count_(count) {}
int64_t Cardinality() const override {
if (split_provider_->Cardinality() == 0 || count_ == 0) {
return 0;
}
if (count_ < 0) {
return kInfiniteCardinality;
}
if (split_provider_->Cardinality() < 0) {
return split_provider_->Cardinality();
}
return split_provider_->Cardinality() * count_;
}
absl::Status GetNext(Tensor* split, bool* end_of_splits) override {
return split_provider_->GetNext(split, end_of_splits);
}
absl::Status Reset() override { return split_provider_->Reset(); }
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override {
return split_provider_->Save(full_name, writer);
}
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override {
return split_provider_->Restore(full_name, reader);
}
void Cancel() override { split_provider_->Cancel(); }
private:
const std::unique_ptr<SplitProvider> split_provider_;
const int64_t count_;
};
}
class RepeatDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
if (input_ != nullptr && !input_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
} else if (count <= 0) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("`repeat(", count,
")` does not support random access of tf.data "
"datasets."));
} else {
random_indexing_compatible_ = absl::OkStatus();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
if (count_ < 0) {
return std::make_unique<ForeverIterator>(ForeverIterator::Params{
this, name_utils::IteratorPrefix(kForeverRepeat, prefix)});
} else if (count_ == 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptyRepeat, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteRepeat, prefix)});
}
}
absl::Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
std::vector<std::unique_ptr<SplitProvider>> input_split_providers;
TF_RETURN_IF_ERROR(input_->MakeSplitProviders(&input_split_providers));
split_providers->clear();
for (auto& split_provider : input_split_providers) {
split_providers->push_back(std::make_unique<RepeatedSplitProvider>(
std::move(split_provider), count_));
}
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(RepeatDatasetOp::kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (count_ < 0) {
if (n == 0) {
return 0;
}
return kInfiniteCardinality;
}
if (count_ == 0) {
return 0;
}
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return count_ * n;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index % input_->Cardinality(), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
private:
class EmptyIterator : public DatasetIterator<Dataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class FiniteIterator : public DatasetIterator<Dataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (i_ < dataset()->count_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
ctx_with_index_mapper.MergeCheckpoint();
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
input_impl_.reset();
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(IndexMapperFn parent_index_mapper)
const override TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t input_cardinality = dataset()->input_->Cardinality();
int64_t repeat_count = i_;
return [parent_index_mapper, input_cardinality,
repeat_count](size_t element_position) -> absl::StatusOr<size_t> {
if (element_position >= input_cardinality) {
return absl::OutOfRangeError("Finite repeat is out of range");
}
size_t repeated_element_position =
repeat_count * input_cardinality + element_position;
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(repeated_element_position));
return shuffled_element_position % input_cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
if (ctx->restored_element_count().has_value()) {
CardinalityOptions options;
options.set_compute_level(
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE);
const int64_t input_cardinality =
dataset()->input_->Cardinality(std::move(options));
IteratorContext::Params params(ctx);
params.restored_element_count =
*ctx->restored_element_count() % (input_cardinality);
params.index_mapper = GetIndexMapper(ctx->index_mapper());
IteratorContext ctx_with_restored_element_count(params);
if (!input_empty) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(&ctx_with_restored_element_count,
reader, input_impl_));
ctx->MergeCheckpoint(ctx_with_restored_element_count.checkpoint());
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
if (static_cast<bool>(!input_empty)) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
class ForeverIterator : public DatasetIterator<Dataset> {
public:
explicit ForeverIterator(const Params& params)
: DatasetIterator<Dataset>(params),
has_data_service_input_(HasDataServiceInput(dataset())),
input_impl_(nullptr),
i_(0),
first_call_(true) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (!input_impl_) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
DCHECK(!*end_of_sequence || out_tensors->empty());
if (first_call_ && *end_of_sequence && ctx->split_providers().empty()) {
if (!has_data_service_input_) {
input_impl_.reset();
return absl::OkStatus();
}
}
first_call_ = false;
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
input_impl_.reset();
first_call_ = true;
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(input_empty)) {
input_impl_.reset();
first_call_ = true;
} else {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
first_call_ = false;
}
return absl::OkStatus();
}
private:
const bool has_data_service_input_;
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t i_ TF_GUARDED_BY(mu_);
bool first_call_ TF_GUARDED_BY(mu_);
};
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
RepeatDatasetOp::RepeatDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RepeatDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new Dataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RepeatDataset").Device(DEVICE_CPU),
RepeatDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "repeat_dataset";
class RepeatDatasetParams : public DatasetParams {
public:
template <typename T>
RepeatDatasetParams(T input_dataset_params, int64_t count,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
count_(count) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {count_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(RepeatDatasetOp::kInputDataset);
input_names->emplace_back(RepeatDatasetOp::kCount);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return RepeatDatasetOp::kDatasetType; }
private:
int64_t count_;
};
class RepeatDatasetOpTest : public DatasetOpsTestBase {};
RepeatDatasetParams FiniteRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
2,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams EmptyRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
0,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams ForeverRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 1}, {1, 2})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
-1,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<RepeatDatasetParams>> GetNextTestCases() {
return {{FiniteRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{}},
{
ForeverRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorGetNextOpTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorGetNextOpTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
if (dataset_->Cardinality() == kInfiniteCardinality) {
for (int i = 0; i < 100; ++i) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
for (const auto& tensor : out_tensors) {
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
if (expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
}
EXPECT_FALSE(end_of_sequence);
} else {
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
}
INSTANTIATE_TEST_SUITE_P(RepeatDatasetOpTest,
ParameterizedIteratorGetNextOpTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(RepeatDatasetOpTest, DatasetNodeName) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(RepeatDatasetOpTest, DatasetTypeString) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(RepeatDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<RepeatDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<RepeatDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<RepeatDatasetParams>>
DatasetCardinalityTestCases() {
return {{FiniteRepeatDatasetParams(), 4},
{EmptyRepeatDatasetParams(), 0},
{ForeverRepeatDatasetParams(),
kInfiniteCardinality}};
}
DATASET_CARDINALITY_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<RepeatDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<RepeatDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<RepeatDatasetParams>>
IteratorPrefixTestCases() {
return {
{FiniteRepeatDatasetParams(),
name_utils::IteratorPrefix(
"FiniteRepeat", FiniteRepeatDatasetParams().iterator_prefix())},
{EmptyRepeatDatasetParams(),
name_utils::IteratorPrefix(
"EmptyRepeat", EmptyRepeatDatasetParams().iterator_prefix())},
{ForeverRepeatDatasetParams(),
name_utils::IteratorPrefix(
"ForeverRepeat", ForeverRepeatDatasetParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<RepeatDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FiniteRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{0, 1, 3},
{}},
{
ForeverRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, Roundtrip) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = dataset_->Cardinality() == 0;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
std::vector<int> breakpoints = GetParam().breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration < breakpoint) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
cur_iteration++;
if (dataset_->Cardinality() == kInfiniteCardinality &&
expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
if (breakpoint >= dataset_->Cardinality()) {
if (dataset_->Cardinality() == kInfiniteCardinality) {
EXPECT_FALSE(end_of_sequence);
} else {
EXPECT_TRUE(end_of_sequence);
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
} else {
EXPECT_FALSE(end_of_sequence);
}
}
}
INSTANTIATE_TEST_SUITE_P(
RepeatDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/repeat_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/repeat_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab1f52e5-859f-4781-a52a-c1677ca42465 | cpp | tensorflow/tensorflow | parallel_map_dataset_op | tensorflow/core/kernels/data/parallel_map_dataset_op.cc | tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc | #include "tensorflow/core/kernels/data/parallel_map_dataset_op.h"
#include <cstddef>
#include <deque>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/data/unbounded_thread_pool.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/logging.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelMapDatasetOp::kDatasetType;
constexpr const char* const ParallelMapDatasetOp::kInputDataset;
constexpr const char* const ParallelMapDatasetOp::kOtherArguments;
constexpr const char* const
ParallelMapDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelMapDatasetOp::kFunc;
constexpr const char* const ParallelMapDatasetOp::kTarguments;
constexpr const char* const ParallelMapDatasetOp::kOutputTypes;
constexpr const char* const ParallelMapDatasetOp::kOutputShapes;
constexpr const char* const
ParallelMapDatasetOp::kUseInterOpParallelism;
constexpr const char* const ParallelMapDatasetOp::kDeterministic;
constexpr const char* const ParallelMapDatasetOp::kSloppy;
constexpr const char* const
ParallelMapDatasetOp::kPreserveCardinality;
namespace {
constexpr char kParallelMapDatasetV1[] = "ParallelMapDataset";
constexpr char kParallelMapDatasetV2[] = "ParallelMapDatasetV2";
constexpr char kInvocationResults[] = "invocation_results";
constexpr char kSize[] = "size";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kErrorCode[] = "code";
constexpr char kErrorMessage[] = "error_message";
constexpr int kStatsReportingPeriodMillis = 1000;
constexpr int kUnboundedThreadpoolAutotuningFactor = 10;
}
class ParallelMapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
int64_t num_parallel_calls, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality, bool use_unbounded_threadpool,
int op_version)
: Dataset(DatasetContext(ctx), input, num_parallel_calls, output_types,
output_shapes, deterministic, std::move(captured_func),
preserve_cardinality, use_unbounded_threadpool, op_version) {}
Dataset(DatasetContext dataset_context, const DatasetBase* input,
int64_t num_parallel_calls, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality, bool use_unbounded_threadpool,
int op_version)
: DatasetBase(std::move(dataset_context)),
input_(input),
num_parallel_calls_(num_parallel_calls),
output_types_(output_types),
output_shapes_(output_shapes),
deterministic_(deterministic),
preserve_cardinality_(preserve_cardinality),
use_unbounded_threadpool_(use_unbounded_threadpool),
captured_func_(std::move(captured_func)),
op_version_(op_version) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(ParallelMapDatasetOp::kDatasetType,
params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (preserve_cardinality_) {
return input_->Cardinality(options);
} else {
return kUnknownCardinality;
}
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
absl::call_once(instantiated_captured_func_once_, [this, ctx] {
instantiated_captured_func_status_ = captured_func_->Instantiate(
InstantiateCapturedFunctionParams(ctx), &instantiated_captured_func_);
});
TF_RETURN_IF_ERROR(instantiated_captured_func_status_);
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(input_->Get(ctx, index, &args));
return instantiated_captured_func_->RunInstantiated(args, out_tensors);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
Node* num_parallel_calls = nullptr;
if (op_version_ == 1) {
TF_RETURN_IF_ERROR(b->AddScalar(static_cast<int32>(num_parallel_calls_),
&num_parallel_calls));
} else {
TF_RETURN_IF_ERROR(
b->AddScalar(num_parallel_calls_, &num_parallel_calls));
}
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue f_attr;
b->BuildAttrValue(captured_func_->func(), &f_attr);
attrs.emplace_back(kFunc, f_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
attrs.emplace_back(kTarguments, other_arguments_types_attr);
AttrValue use_inter_op_parallelism_attr;
b->BuildAttrValue(captured_func_->use_inter_op_parallelism(),
&use_inter_op_parallelism_attr);
attrs.emplace_back(kUseInterOpParallelism, use_inter_op_parallelism_attr);
if (op_version_ == 1) {
AttrValue sloppy_attr;
b->BuildAttrValue(deterministic_.IsNondeterministic(), &sloppy_attr);
attrs.emplace_back(kSloppy, sloppy_attr);
}
if (op_version_ == 2) {
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
}
AttrValue preserve_cardinality_attr;
b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);
attrs.emplace_back(kPreserveCardinality, preserve_cardinality_attr);
AttrValue use_unbounded_threadpool_attr;
b->BuildAttrValue(use_unbounded_threadpool_,
&use_unbounded_threadpool_attr);
attrs.emplace_back(kUseUnboundedThreadpool, use_unbounded_threadpool_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node),
std::make_pair(2, num_parallel_calls)},
{std::make_pair(1, other_arguments)},
attrs, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()),
preserve_cardinality_(params.dataset->preserve_cardinality_),
use_unbounded_threadpool_(params.dataset->use_unbounded_threadpool_),
autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {}
~Iterator() override {
CancelThreads(true);
input_impl_.reset();
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override {
return deterministic_;
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (use_unbounded_threadpool_) {
unbounded_thread_pool_ = std::make_unique<UnboundedThreadPool>(
ctx->env(), "tf_data_map_unbounded_thread_pool");
}
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
auto params = std::make_unique<IteratorContext::Params>(ctx);
params->cancellation_manager = cancellation_manager_.get();
auto iter_ctx = std::make_unique<IteratorContext>(*params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
iter_ctx.get(), this, prefix(), &input_impl_));
ctx->MergeCheckpoint(iter_ctx->checkpoint());
TF_RETURN_IF_ERROR(dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_));
if (ctx->warm_start() && !ctx->is_restoring()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<InvocationResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(&result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
RecordStop(ctx);
result->notification.WaitForNotification();
RecordStart(ctx);
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelMapConsume",
{{"element_id", result->uid}});
});
return ProcessResult(ctx, result, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
std::shared_ptr<model::Parameter> parameter;
double max_parallelism_value = ctx->runner_threadpool_size();
if (use_unbounded_threadpool_) {
max_parallelism_value *= kUnboundedThreadpoolAutotuningFactor;
}
if (num_parallel_calls_ &&
dataset()->num_parallel_calls_ == model::kAutotune) {
parameter = model::MakeParameter(
"parallelism", num_parallel_calls_, 1,
max_parallelism_value,
GetAutotuneDefaultParallelism(ctx));
} else {
parameter =
model::MakeParameter("parallelism", num_parallel_calls_, 1,
max_parallelism_value);
}
std::optional<int64_t> estimated_element_size =
dataset()->GetEstimatedElementSize();
if (!estimated_element_size) {
VLOG(2) << absl::StrFormat(
"Cannot estimate the size of the output tensor because the "
"output shape of node %s(id:%d) is only partially known.",
args.name, args.id);
}
return model::MakeAsyncKnownRatioNode(
std::move(args),
1, {std::move(parameter)},
false, estimated_element_size);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
if (num_calls_ != 0) {
return errors::FailedPrecondition(
"Unexpected outstanding calls encountered.");
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrCat(kInvocationResults, "_", kSize),
invocation_results_.size()));
for (size_t i = 0; i < invocation_results_.size(); i++) {
const auto& result = *(invocation_results_[i]);
std::string element_prefix =
absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]");
TF_RETURN_IF_ERROR(
WriteStatusLocked(writer, element_prefix, result.status));
TF_RETURN_IF_ERROR(writer->WriteScalar(element_prefix, kSize,
result.return_values.size()));
for (size_t j = 0; j < result.return_values.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(element_prefix,
absl::StrCat("[", j, "]"),
result.return_values[j]));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kEndOfInput,
static_cast<int64_t>(result.end_of_input)));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
DCHECK(invocation_results_.empty());
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
int64_t invocation_results_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), absl::StrCat(kInvocationResults, "_", kSize),
&invocation_results_size));
for (size_t i = 0; i < invocation_results_size; i++) {
invocation_results_.push_back(std::make_shared<InvocationResult>(ctx));
auto& result = *invocation_results_.back();
std::string element_prefix =
absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]");
TF_RETURN_IF_ERROR(
ReadStatusLocked(reader, element_prefix, &result.status));
size_t num_return_values;
{
int64_t size;
TF_RETURN_IF_ERROR(reader->ReadScalar(element_prefix, kSize, &size));
num_return_values = static_cast<size_t>(size);
if (num_return_values != size) {
return errors::InvalidArgument(
element_prefix, ",", kSize, ": ", size,
" is not a valid value of type size_t.");
}
}
result.return_values.reserve(num_return_values);
for (size_t j = 0; j < num_return_values; j++) {
result.return_values.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), element_prefix,
absl::StrCat("[", j, "]"),
&result.return_values.back()));
}
int64_t end_of_input;
TF_RETURN_IF_ERROR(
reader->ReadScalar(element_prefix, kEndOfInput, &end_of_input));
result.end_of_input = static_cast<bool>(end_of_input);
RecordBufferEnqueue(ctx, result.return_values);
result.notification.Notify();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
data::TraceMeMetadata result;
result.push_back(
std::make_pair("autotune", autotune_ ? "true" : "false"));
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(
std::make_pair("use_unbounded_threadpool",
use_unbounded_threadpool_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct InvocationResult {
explicit InvocationResult(IteratorContext* ctx)
: uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
Notification notification;
Status status;
std::vector<Tensor> return_values;
bool end_of_input = false;
const int64_t uid;
MemoryCheckpoint checkpoint;
};
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto ctx_copy = std::make_shared<IteratorContext>(*ctx);
runner_thread_ = ctx->StartThread(
"tf_data_parallel_map",
std::bind(&Iterator::RunnerThread, this, ctx_copy));
if (ctx->stats_aggregator()) {
stats_thread_ = ctx->StartThread(
"tf_data_parallel_map_stats",
std::bind(&Iterator::StatsThread, this, ctx_copy));
}
}
}
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->notification.Notify();
cond_var_->notify_all();
}
void CallFunction(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelMapProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
result->status = input_impl_->GetNext(ctx.get(), &input_element,
&result->end_of_input);
result->checkpoint.Merge(ctx->checkpoint());
if (result->end_of_input || !result->status.ok()) {
CallCompleted(ctx, result);
return;
}
auto done = [this, ctx, result](Status status) {
if (!status.ok()) {
result->status = AddErrorContext(status);
}
RecordBufferEnqueue(ctx.get(), result->return_values);
CallCompleted(ctx, result);
};
if (use_unbounded_threadpool_) {
auto runner_fn = [this](std::function<void()> fn) {
this->unbounded_thread_pool_->Schedule(fn);
};
instantiated_captured_func_->RunAsync(
runner_fn, ctx->cancellation_manager(), ctx->collective_executor(),
std::move(input_element), &result->return_values, done,
model_node());
} else if (dataset()->captured_func_->use_inter_op_parallelism()) {
instantiated_captured_func_->RunAsync(
ctx.get(), std::move(input_element), &result->return_values,
std::move(done), model_node());
} else {
auto fn = std::bind(
[this, ctx, result](std::vector<Tensor> input_element) {
return instantiated_captured_func_->Run(
ctx.get(), std::move(input_element), &result->return_values,
model_node());
},
std::move(input_element));
(*ctx->runner())(
[this, ctx, fn = std::move(fn), done = std::move(done)]() {
Status s;
if (IsRecording(ctx.get())) {
s = fn();
} else {
RecordStart(ctx.get());
s = fn();
RecordStop(ctx.get());
}
done(s);
});
}
}
Status ProcessResult(IteratorContext* ctx,
const std::shared_ptr<InvocationResult>& result,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) {
ctx->MergeCheckpoint(&result->checkpoint);
if (!result->end_of_input && result->status.ok()) {
*out_tensors = std::move(result->return_values);
RecordBufferDequeue(ctx, *out_tensors);
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(result->status)) {
if (preserve_cardinality_) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
result->status.message());
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
*end_of_sequence = result->end_of_input;
return result->status;
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
std::vector<std::shared_ptr<InvocationResult>> new_calls;
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
invocation_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
invocation_results_.push_back(
std::make_shared<InvocationResult>(ctx.get()));
new_calls.push_back(invocation_results_.back());
num_calls_++;
}
cond_var_->notify_all();
}
for (const auto& call : new_calls) {
CallFunction(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(std::shared_ptr<InvocationResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
if (!deterministic_) {
for (auto it = invocation_results_.begin();
it != invocation_results_.end(); ++it) {
if ((*it)->notification.HasBeenNotified() &&
(it == invocation_results_.begin() || !(*it)->end_of_input)) {
std::swap(*result, *it);
invocation_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else if (!invocation_results_.empty()) {
std::swap(*result, invocation_results_.front());
invocation_results_.pop_front();
cond_var_->notify_all();
return false;
}
return true;
}
void StatsThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
for (int64_t step = 0;; ++step) {
int num_calls;
int num_parallel_calls;
{
mutex_lock l(*mu_);
if (step != 0 && !cancelled_) {
cond_var_->wait_for(
l, std::chrono::milliseconds(kStatsReportingPeriodMillis));
}
if (cancelled_) {
return;
}
num_calls = num_calls_;
num_parallel_calls = num_parallel_calls_->value;
}
if (num_parallel_calls == 0) {
num_parallel_calls = 1;
}
ctx->stats_aggregator()->AddScalar(
stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),
static_cast<float>(num_calls) /
static_cast<float>(num_parallel_calls),
step);
}
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const std::string& prefix, const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix, absl::StrCat("_", kErrorCode),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix,
absl::StrCat("_", kErrorMessage),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader,
const std::string& prefix, Status* status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix, absl::StrCat("_", kErrorCode), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix, absl::StrCat("_", kErrorMessage), &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
const bool deterministic_;
const bool preserve_cardinality_;
const bool use_unbounded_threadpool_;
const bool autotune_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<InvocationResult>> invocation_results_
TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
std::unique_ptr<Thread> stats_thread_ TF_GUARDED_BY(*mu_);
std::unique_ptr<UnboundedThreadPool> unbounded_thread_pool_;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
};
const DatasetBase* const input_;
const int64_t num_parallel_calls_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const DeterminismPolicy deterministic_;
const bool preserve_cardinality_;
const bool use_unbounded_threadpool_;
const std::unique_ptr<CapturedFunction> captured_func_;
const int op_version_;
mutable absl::once_flag instantiated_captured_func_once_;
mutable absl::Status instantiated_captured_func_status_;
mutable std::unique_ptr<InstantiatedCapturedFunction>
instantiated_captured_func_;
absl::Status random_indexing_compatible_;
};
ParallelMapDatasetOp::ParallelMapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), op_version_(ctx->HasAttr(kSloppy) ? 1 : 2) {
FunctionMetadata::Params params;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseInterOpParallelism,
¶ms.use_inter_op_parallelism));
OP_REQUIRES_OK(ctx,
FunctionMetadata::Create(ctx, kFunc, params, &func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
if (op_version_ == 1) {
bool sloppy;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSloppy, &sloppy));
if (sloppy) {
deterministic_ =
DeterminismPolicy(DeterminismPolicy::Type::kNondeterministic);
} else {
deterministic_ = DeterminismPolicy(DeterminismPolicy::Type::kDefault);
}
use_unbounded_threadpool_ = false;
}
if (op_version_ == 2) {
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(
ctx, DeterminismPolicy::FromString(deterministic, &deterministic_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kUseUnboundedThreadpool, &use_unbounded_threadpool_));
}
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kPreserveCardinality, &preserve_cardinality_));
}
void ParallelMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t num_parallel_calls;
if (op_version_ == 1) {
int32_t parallel_calls;
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, ¶llel_calls));
num_parallel_calls = parallel_calls;
}
if (op_version_ == 2) {
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls));
}
OP_REQUIRES(
ctx, num_parallel_calls > 0 || num_parallel_calls == model::kAutotune,
errors::InvalidArgument("num_parallel_calls must be greater than zero."));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
if (num_parallel_calls == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, num_parallel_calls, output_types_,
output_shapes_, deterministic_,
std::move(captured_func), preserve_cardinality_,
use_unbounded_threadpool_, op_version_);
}
std::unique_ptr<DatasetBase> MakeDataServiceUncompressDataset(
DatasetBase* input, std::unique_ptr<CapturedFunction> captured_function,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
DatasetContext::Params param;
param.type_string = kParallelMapDatasetV2;
param.node_name = kParallelMapDatasetV2;
return std::make_unique<ParallelMapDatasetOp::Dataset>(
DatasetContext(std::move(param)), input,
model::kAutotune, output_types, output_shapes,
DeterminismPolicy(DeterminismPolicy::Type::kDefault),
std::move(captured_function),
true,
false, 2);
}
namespace {
REGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV1).Device(DEVICE_CPU),
ParallelMapDatasetOp);
REGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV2).Device(DEVICE_CPU),
ParallelMapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV1);
REGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV2);
}
}
} | #include "tensorflow/core/kernels/data/parallel_map_dataset_op.h"
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_map_dataset";
constexpr int kOpVersion = 2;
class ParallelMapDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelMapDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int num_parallel_calls, FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes,
bool use_inter_op_parallelism, const std::string& deterministic,
bool preserve_cardinality, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
num_parallel_calls_(num_parallel_calls),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)),
use_inter_op_parallelism_(use_inter_op_parallelism),
deterministic_(deterministic),
preserve_cardinality_(preserve_cardinality) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix(), params);
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(ParallelMapDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelMapDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelMapDatasetOp::kNumParallelCalls);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"use_inter_op_parallelism", use_inter_op_parallelism_},
{"deterministic", deterministic_},
{"preserve_cardinality", preserve_cardinality_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelMapDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int num_parallel_calls_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
bool use_inter_op_parallelism_;
std::string deterministic_;
bool preserve_cardinality_;
};
class ParallelMapDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MapFunc(const string& func_name,
const DataType& dtype) {
return FunctionDefHelper::FunctionRef(func_name, {{"T", dtype}});
}
ParallelMapDatasetParams ParallelMapDatasetParams1() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
1,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams2() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
2,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams3() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
3,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams4() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
4,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams5() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
model::kAutotune,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams6() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
4,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams7() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
2,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams8() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
model::kAutotune,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams9() {
return ParallelMapDatasetParams(
BatchDatasetParams(RangeDatasetParams(0, 4, 1),
3,
false,
false,
{DT_INT64},
{PartialTensorShape({-1})},
"batch_dataset"),
{},
1,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({-1})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParamsWithInvalidNumParallelCalls() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
-4,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
std::vector<GetNextTestCase<ParallelMapDatasetParams>> GetNextTestCases() {
return {{ParallelMapDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
false},
{ParallelMapDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{ParallelMapDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
false},
{
ParallelMapDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{
ParallelMapDatasetParams9(),
{CreateTensor<int64_t>(TensorShape{3}, {0, 2, 4}),
CreateTensor<int64_t>(TensorShape{1}, {6})},
true}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,
GetNextTestCases())
TEST_F(ParallelMapDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelMapDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelMapDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelMapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ParallelMapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeHasValue) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
auto element_size = dataset_->GetEstimatedElementSize();
ASSERT_TRUE(element_size.has_value());
EXPECT_GT(element_size.value(), 0);
}
TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeNoValue) {
auto dataset_params = ParallelMapDatasetParams9();
TF_ASSERT_OK(Initialize(dataset_params));
EXPECT_FALSE(dataset_->GetEstimatedElementSize().has_value());
}
std::vector<CardinalityTestCase<ParallelMapDatasetParams>>
CardinalityTestCases() {
return {{ParallelMapDatasetParams1(),
kUnknownCardinality},
{ParallelMapDatasetParams2(),
4},
{ParallelMapDatasetParams3(),
kUnknownCardinality},
{ParallelMapDatasetParams4(),
kUnknownCardinality},
{ParallelMapDatasetParams5(),
4},
{ParallelMapDatasetParams6(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,
CardinalityTestCases())
TEST_F(ParallelMapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ParallelMapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ParallelMapDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelMapDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelMapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelMapDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
false},
{ParallelMapDatasetParams3(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{ParallelMapDatasetParams4(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
false},
{
ParallelMapDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelMapDatasetOpTest,
ParallelMapDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelMapDatasetOpTest, InvalidNumParallelCalls) {
auto dataset_params = ParallelMapDatasetParamsWithInvalidNumParallelCalls();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa2137ae-9b6f-4c3e-b411-fc2113fca95c | cpp | tensorflow/tensorflow | save_dataset_op | tensorflow/core/kernels/data/experimental/save_dataset_op.cc | tensorflow/core/kernels/data/experimental/save_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/hash_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const SaveDatasetOp::kCompression;
constexpr const char* const SaveDatasetOp::kPath;
constexpr const char* const SaveDatasetOp::kShardFunc;
constexpr const char* const SaveDatasetOp::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetOp::kUseShardFunc;
constexpr const int SaveDatasetOp::kFileFormatVersion;
constexpr const char* const SaveDatasetV2Op::kInputDataset;
constexpr const char* const SaveDatasetV2Op::kPath;
constexpr const char* const SaveDatasetV2Op::kCompression;
constexpr const char* const SaveDatasetV2Op::kDatasetType;
constexpr const char* const SaveDatasetV2Op::kOutputTypes;
constexpr const char* const SaveDatasetV2Op::kOutputShapes;
constexpr const char* const SaveDatasetV2Op::kShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetV2Op::kUseShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncTarguments;
constexpr const int SaveDatasetV2Op::kFileFormatVersion;
SaveDatasetOp::SaveDatasetOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_save_dataset") {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
}
Status SaveDatasetOp::DoCompute(OpKernelContext* ctx) {
metrics::RecordTFDataFetchOp("SaveDatasetOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
TF_RETURN_IF_ERROR(ParseScalarArgument(ctx, kPath, &path));
auto run_id = random::New64();
auto run_dir = snapshot_util::RunDirectory(path, run_id);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir));
TF_RETURN_IF_ERROR(
WriteMetadataFile(ctx->env(), path, run_id, dataset->output_dtypes(),
0, false));
std::unique_ptr<CapturedFunction> captured_func;
TF_RETURN_IF_ERROR(CapturedFunction::Create(
ctx, func_metadata_, kShardFuncOtherArgs, &captured_func));
uint64 num_elements = 0;
TF_RETURN_IF_ERROR(WriteData(ctx, dataset, std::move(captured_func), run_dir,
&num_elements));
TF_RETURN_IF_ERROR(WriteMetadataFile(ctx->env(), path, run_id,
dataset->output_dtypes(), num_elements,
true));
return absl::OkStatus();
}
Status SaveDatasetOp::WriteData(OpKernelContext* ctx, DatasetBase* dataset,
std::unique_ptr<CapturedFunction> captured_func,
const std::string& run_dir,
uint64* num_elements) {
IteratorContext::Params params(ctx);
auto function_handle_cache =
std::make_unique<FunctionHandleCache>(params.flr);
params.function_handle_cache = function_handle_cache.get();
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func;
TF_RETURN_IF_ERROR(
captured_func->Instantiate(&iter_ctx, &instantiated_captured_func));
DatasetBase* finalized_dataset;
TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(
&iter_ctx, nullptr, "Save", &iterator));
mutex mu;
Status status;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers;
while (true) {
if (ctx->cancellation_manager()->IsCancelled()) {
return errors::Cancelled("Operation was cancelled");
}
std::vector<Tensor> element;
bool end_of_input;
TF_RETURN_IF_ERROR(iterator->GetNext(&iter_ctx, &element, &end_of_input));
if (end_of_input) {
break;
}
(*num_elements)++;
int64_t shard_index = -1;
TF_RETURN_IF_ERROR(GetShardIndex(
&iter_ctx, instantiated_captured_func.get(), element, &shard_index));
if (writers.count(shard_index) == 0) {
const auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir, shard_index);
auto writer_thread = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
0, compression_, kFileFormatVersion,
finalized_dataset->output_dtypes(), [&mu, &status](Status s) {
mutex_lock l(mu);
status.Update(s);
});
writers.insert({shard_index, std::move(writer_thread)});
}
writers[shard_index]->Write(element);
}
for (auto& writer : writers) {
writer.second->SignalEOF();
}
writers.clear();
return status;
}
Status SaveDatasetOp::GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
int64_t* shard_index) {
if (!use_shard_func_) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument("`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status SaveDatasetOp::WriteMetadataFile(Env* env, const std::string& path,
uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
class SaveDatasetV2Op::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, const tstring& path,
const std::string& compression,
std::unique_ptr<CapturedFunction> shard_func, bool use_shard_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
path_(path),
compression_(compression),
shard_func_(std::move(shard_func)),
use_shard_func_(use_shard_func) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* path_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(path_, &path_node));
std::vector<Node*> shard_func_other_args;
DataTypeVector shard_func_other_args_types;
TF_RETURN_IF_ERROR(shard_func_->AddToGraph(ctx, b, &shard_func_other_args,
&shard_func_other_args_types));
AttrValue compression_attr;
b->BuildAttrValue(compression_, &compression_attr);
AttrValue shard_func_attr;
b->BuildAttrValue(shard_func_->func(), &shard_func_attr);
AttrValue use_shard_func_attr;
b->BuildAttrValue(use_shard_func_, &use_shard_func_attr);
AttrValue shard_func_arguments_types_attr;
b->BuildAttrValue(shard_func_other_args_types,
&shard_func_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node), std::make_pair(1, path_node)},
{std::make_pair(2, shard_func_other_args)},
{std::make_pair(kCompression, compression_attr),
std::make_pair(kShardFunc, shard_func_attr),
std::make_pair(kUseShardFunc, use_shard_func_attr),
std::make_pair(kShardFuncTarguments, shard_func_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
static constexpr const char* const kIteratorName = "Writer";
static constexpr const char* const kRunId = "run_id";
static constexpr const char* const kCurrentCheckpointId =
"current_checkpoint_id";
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
writers_closed_(false),
run_id_(0),
current_checkpoint_id_(0) {}
~Iterator() override {
mutex_lock l(mu_);
SignalEOF(true);
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
dataset()->shard_func_->Instantiate(ctx, &instantiated_shard_func_));
if (!ctx->is_restoring()) {
run_id_ = random::New64();
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = false;
snapshot_util::AsyncWriter* current_writer;
{
std::vector<Tensor> output_tensors;
mutex_lock l(mu_);
{
mutex_lock wsl(writer_status_mu_);
if (!writer_status_.ok() || writers_closed_) {
*end_of_sequence = true;
return writer_status_;
}
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
SignalEOF(true);
{
mutex_lock wsl(writer_status_mu_);
TF_RETURN_IF_ERROR(writer_status_);
}
return WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
dataset()->Cardinality(), true);
}
(num_elements_)++;
int64_t shard_index = 0;
TF_RETURN_IF_ERROR(
GetShardIndex(ctx, instantiated_shard_func_.get(), *out_tensors,
dataset()->use_shard_func_, &shard_index));
if (writers_.count(shard_index) == 0) {
auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir_, shard_index);
auto writer = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
current_checkpoint_id_, dataset()->compression_,
kFileFormatVersion, dataset()->output_dtypes(), [this](Status s) {
if (!s.ok()) {
mutex_lock l(writer_status_mu_);
writer_status_ = s;
}
});
writers_.insert({shard_index, std::move(writer)});
}
current_writer = writers_[shard_index].get();
}
current_writer->Write(*out_tensors);
return absl::OkStatus();
}
protected:
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kRunId),
static_cast<int64_t>(run_id_)));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kCurrentCheckpointId),
static_cast<int64_t>(current_checkpoint_id_)));
SignalEOF(false);
writers_.clear();
current_checkpoint_id_++;
return SaveInput(ctx, writer, input_impl_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t run_id_signed;
int64_t current_checkpoint_id;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kRunId), &run_id_signed));
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kCurrentCheckpointId),
¤t_checkpoint_id));
run_id_ = static_cast<uint64>(run_id_signed);
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
current_checkpoint_id_ = static_cast<uint64>(current_checkpoint_id);
if (ctx->is_restoring()) {
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return RestoreInput(ctx, reader, input_impl_);
}
private:
Status GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
bool use_shard_func, int64_t* shard_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!use_shard_func) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument(
"`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status WriteMetadataFile(Env* env, const std::string& path, uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
void SignalEOF(bool mark_closed) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!writers_closed_) {
for (auto& writer : writers_) {
writer.second->SignalEOF();
}
writers_.clear();
writers_closed_ = mark_closed;
}
}
mutex mu_;
mutex writer_status_mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t num_elements_;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers_ TF_GUARDED_BY(mu_);
Status writer_status_ TF_GUARDED_BY(writer_status_mu_);
bool writers_closed_ TF_GUARDED_BY(mu_);
uint64 run_id_ TF_GUARDED_BY(mu_);
tstring run_dir_ TF_GUARDED_BY(mu_);
uint64 current_checkpoint_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_shard_func_
TF_GUARDED_BY(mu_);
};
const DatasetBase* input_;
const tstring path_;
const std::string compression_;
const std::unique_ptr<CapturedFunction> shard_func_;
const bool use_shard_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const std::shared_ptr<FunctionMetadata> func_metadata_;
const std::string writer_prefix_;
};
SaveDatasetV2Op::SaveDatasetV2Op(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
}
void SaveDatasetV2Op::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
DatasetBase* dataset;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kPath, &path));
std::unique_ptr<CapturedFunction> shard_func;
OP_REQUIRES_OK(
ctx, CapturedFunction::Create(ctx, func_metadata_, kShardFuncOtherArgs,
&shard_func));
*output = new Dataset(ctx, dataset, path, compression_, std::move(shard_func),
use_shard_func_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SaveDataset").Device(DEVICE_CPU), SaveDatasetOp);
REGISTER_KERNEL_BUILDER(Name("SaveDatasetV2").Device(DEVICE_CPU),
SaveDatasetV2Op);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr char kSaveDatasetV2NodeName[] = "save_dataset_v2";
class SaveDatasetV2Params : public DatasetParams {
public:
template <typename T>
SaveDatasetV2Params(T input_dataset_params, const tstring& path,
const std::string& compression,
FunctionDefHelper::AttrValueWrapper shard_func,
std::vector<FunctionDef> func_lib, bool use_shard_func,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name, DataTypeVector type_arguments)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
path_(path),
compression_(compression),
shard_func_(shard_func),
func_lib_(std::move(func_lib)),
use_shard_func_(use_shard_func),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors;
input_tensors.emplace_back(CreateTensor<tstring>(TensorShape({}), {path_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(SaveDatasetV2Op::kInputDataset);
input_names->emplace_back(SaveDatasetV2Op::kPath);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(SaveDatasetV2Op::kCompression, compression_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFunc, shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kUseShardFunc, use_shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFuncTarguments,
type_arguments_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputTypes, output_dtypes_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputShapes, output_shapes_);
return absl::OkStatus();
}
string path() const { return path_; }
string dataset_type() const override { return SaveDatasetV2Op::kDatasetType; }
string op_name() const override { return "SaveDatasetV2"; }
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::string path_;
std::string compression_;
FunctionDefHelper::AttrValueWrapper shard_func_;
std::vector<FunctionDef> func_lib_;
bool use_shard_func_;
DataTypeVector type_arguments_;
};
class SaveDatasetV2OpTest : public DatasetOpsTestBase {
public:
Status Initialize(const DatasetParams& dataset_params) {
TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params));
auto params = static_cast<const SaveDatasetV2Params&>(dataset_params);
save_filename_ = params.path();
return absl::OkStatus();
}
protected:
std::string save_filename_;
};
SaveDatasetV2Params SaveDatasetV2Params1() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 10, 2),
io::JoinPath(testing::TmpDir(), "save_data"),
"",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
false,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
SaveDatasetV2Params SaveDatasetV2Params2() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 5, 1),
io::JoinPath(testing::TmpDir(), "save_data"),
"GZIP",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
true,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
std::vector<GetNextTestCase<SaveDatasetV2Params>> GetNextTestCases() {
return {{
SaveDatasetV2Params1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedGetNextTest : public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
GetNextTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(SaveDatasetV2OpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(SaveDatasetV2OpTest, DatasetNodeName) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SaveDatasetV2OpTest, DatasetTypeString) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString("SaveDatasetV2"));
}
TEST_F(SaveDatasetV2OpTest, DatasetOutputDtypes) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
std::vector<DatasetOutputDtypesTestCase<SaveDatasetV2Params>>
DatasetOutputDtypesTestCases() {
return {{SaveDatasetV2Params1(),
{DT_INT64}},
{SaveDatasetV2Params2(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SaveDatasetV2Params>>
DatasetOutputShapesTestCases() {
return {{SaveDatasetV2Params1(),
{PartialTensorShape({})}},
{SaveDatasetV2Params2(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SaveDatasetV2Params>> CardinalityTestCases() {
return {{SaveDatasetV2Params1(),
5},
{SaveDatasetV2Params2(),
5}};
}
DATASET_CARDINALITY_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
CardinalityTestCases())
TEST_F(SaveDatasetV2OpTest, IteratorPrefix) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
SaveDatasetV2Op::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>>
IteratorSaveAndRestoreTestCases() {
return {{SaveDatasetV2Params1(),
{0, 2, 4, 6, 8},
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(SaveDatasetV2OpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/save_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/save_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cd4789df-22b1-45b7-8784-0f2d1d2727ba | cpp | tensorflow/tensorflow | assert_next_dataset_op | tensorflow/core/kernels/data/experimental/assert_next_dataset_op.cc | tensorflow/core/kernels/data/experimental/assert_next_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h"
#include <map>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const AssertNextDatasetOp::kInputDataset;
constexpr const char* const AssertNextDatasetOp::kDatasetType;
constexpr const char* const AssertNextDatasetOp::kTransformations;
constexpr const char* const AssertNextDatasetOp::kOutputTypes;
constexpr const char* const AssertNextDatasetOp::kOutputShapes;
class AssertNextDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const std::vector<tstring>& transformations,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
transformations_(transformations),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* transformations_node = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, transformations_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
std::vector<string> tokens =
absl::StrSplit(prefix(), ':', absl::SkipEmpty());
if (dataset()->transformations_.size() > tokens.size() - 2) {
return errors::InvalidArgument(
"Asserted next ", dataset()->transformations_.size(),
" transformations but encountered only ", tokens.size() - 2, ".");
}
int n = tokens.size();
for (size_t i = 0; i < dataset()->transformations_.size(); ++i) {
if (!MatchesAnyVersion(dataset()->transformations_[i],
tokens[n - 2 - i])) {
return errors::InvalidArgument("Asserted transformation matching ",
dataset()->transformations_[i],
" at offset ", i, " but encountered ",
tokens[n - 2 - i],
" transformation instead.");
}
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
};
const DatasetBase* input_;
const std::vector<tstring> transformations_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
};
AssertNextDatasetOp::AssertNextDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void AssertNextDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::vector<tstring> transformations;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kTransformations,
&transformations));
*output =
new Dataset(ctx, input, transformations, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AssertNextDataset").Device(DEVICE_CPU),
AssertNextDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalAssertNextDataset").Device(DEVICE_CPU),
AssertNextDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "assert_next_dataset";
class AssertNextDatasetParams : public DatasetParams {
public:
template <typename T>
AssertNextDatasetParams(T input_dataset_params,
const std::vector<tstring>& transformations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
transformations_(transformations) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
int num_transformations = transformations_.size();
return {CreateTensor<tstring>(TensorShape({num_transformations}),
transformations_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() + 1);
input_names->emplace_back(AssertNextDatasetOp::kInputDataset);
input_names->emplace_back(AssertNextDatasetOp::kTransformations);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{AssertNextDatasetOp::kOutputShapes, output_shapes_},
{AssertNextDatasetOp::kOutputTypes, output_dtypes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return AssertNextDatasetOp::kDatasetType;
}
private:
std::vector<tstring> transformations_;
};
class AssertNextDatasetOpTest : public DatasetOpsTestBase {};
AssertNextDatasetParams AssertNextDatasetParams1() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams AssertNextDatasetParams2() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams InvalidAssertNextDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(std::move(take_dataset_params),
{"Whoops"},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams ShortAssertNextDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType, "Whoops"},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AssertNextDatasetParams>> GetNextTestCases() {
return {{AssertNextDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertNextDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_GET_NEXT_TEST_P(AssertNextDatasetOpTest, AssertNextDatasetParams,
GetNextTestCases())
TEST_F(AssertNextDatasetOpTest, DatasetNodeName) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertNextDatasetOpTest, DatasetTypeString) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(AssertNextDatasetOp::kDatasetType)));
}
TEST_F(AssertNextDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(AssertNextDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertNextDatasetOpTest, Cardinality) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(3));
}
TEST_F(AssertNextDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(AssertNextDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertNextDatasetOpTest, IteratorPrefix) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
AssertNextDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<AssertNextDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{AssertNextDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertNextDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(AssertNextDatasetOpTest,
AssertNextDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(AssertNextDatasetOpTest, InvalidArguments) {
auto dataset_params = InvalidAssertNextDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(AssertNextDatasetOpTest, ShortAssertNext) {
auto dataset_params = ShortAssertNextDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_next_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_next_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6bf7de3c-09a8-4dc1-a844-4bd09c44fcb0 | cpp | tensorflow/tensorflow | list_dataset_op | tensorflow/core/kernels/data/experimental/list_dataset_op.cc | tensorflow/core/kernels/data/experimental/list_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/list_dataset_op.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const ListDatasetOp::kDatasetType;
constexpr const char* const ListDatasetOp::kTensors;
constexpr const char* const ListDatasetOp::kTinputTypes;
constexpr const char* const ListDatasetOp::kOutputTypes;
constexpr const char* const ListDatasetOp::kOutputShapes;
class ListDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors,
const DataTypeVector& input_types, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
int num_components)
: DatasetBase(DatasetContext(ctx)),
tensors_(std::move(tensors)),
num_elements_(tensors_.size() / num_components),
num_components_(num_components),
input_types_(input_types),
output_types_(output_types),
output_shapes_(output_shapes) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<IndexSplitProvider>(num_elements_));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return num_elements_;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
absl::Status Get(OpKernelContext* ctx, int64_t index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
absl::Status Get(AnyContext ctx, int64_t index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->clear();
out_tensors->reserve(num_components_);
for (int i = 0; i < num_components_; ++i) {
out_tensors->push_back(tensors_[i + num_components_ * index]);
}
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> tensors;
tensors.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
tensors.emplace_back(node);
}
AttrValue input_types;
b->BuildAttrValue(input_types_, &input_types);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, tensors}},
{{kTinputTypes, input_types}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty()) {
split_provider_ =
std::make_shared<IndexSplitProvider>(dataset()->num_elements_);
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
int64_t index = split.scalar<int64_t>()();
out_tensors->reserve(dataset()->num_components_);
for (size_t i = 0; i < dataset()->num_components_; ++i) {
out_tensors->push_back(
dataset()->tensors_[i + dataset()->num_components_ * index]);
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(split_provider_->Save(
[this](const std::string& key) { return full_name(key); }, writer));
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
return split_provider_->Restore(
[this](const std::string& key) { return full_name(key); }, reader);
}
private:
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
int64 num_elements_;
size_t num_components_;
DataTypeVector input_types_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
ListDatasetOp::ListDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kTinputTypes, &input_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void ListDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kTensors, &inputs));
std::vector<Tensor> tensors(inputs.begin(), inputs.end());
*output = new Dataset(ctx, std::move(tensors), input_types_, output_types_,
output_shapes_, output_shapes_.size());
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ListDataset").Device(DEVICE_CPU), ListDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/experimental/list_dataset_op.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_list_dataset";
class ListDatasetOpTest : public DatasetOpsTestBase {};
class ListDatasetParams : public DatasetParams {
public:
ListDatasetParams(std::vector<std::vector<Tensor>> elements, string node_name)
: DatasetParams(ListOutputTypes(elements), ListOutputShapes(elements),
std::move(node_name)) {
input_types_.reserve(elements.size() * elements.front().size());
tensors_.reserve(elements.size() * elements.front().size());
for (const auto& element : elements) {
for (const auto& tensor : element) {
input_types_.push_back(tensor.dtype());
tensors_.emplace_back(std::move(tensor));
}
}
}
std::vector<Tensor> GetInputTensors() const override { return tensors_; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(tensors_.size());
for (int i = 0; i < tensors_.size(); ++i) {
input_names->emplace_back(absl::StrCat("tensors_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"Tinput_types", input_types_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return "List"; }
int64_t num_elements() const {
return tensors_.size() / num_tensors_per_element();
}
size_t num_tensors_per_element() const { return output_shapes_.size(); }
private:
DataTypeVector ListInputTypes(
const std::vector<std::vector<Tensor>>& input_elements) {
DataTypeVector input_types;
for (const auto& element : input_elements) {
for (const auto& tensor : element) {
input_types.emplace_back(tensor.dtype());
}
}
return input_types;
}
DataTypeVector ListOutputTypes(
const std::vector<std::vector<Tensor>>& input_elements) {
DataTypeVector output_types;
for (const auto& tensor : input_elements.front()) {
output_types.emplace_back(tensor.dtype());
}
return output_types;
}
std::vector<PartialTensorShape> ListOutputShapes(
const std::vector<std::vector<Tensor>>& input_elements) {
std::vector<PartialTensorShape> output_shapes;
for (const auto& tensor : input_elements.front()) {
absl::InlinedVector<int64_t, 4UL> partial_dim_sizes;
partial_dim_sizes.reserve(tensor.dims());
for (int i = 0; i < tensor.dims(); ++i) {
partial_dim_sizes.push_back(tensor.dim_size(i));
}
output_shapes.emplace_back(std::move(partial_dim_sizes));
}
return output_shapes;
}
public:
std::vector<Tensor> tensors_;
DataTypeVector input_types_;
};
ListDatasetParams PlainListDatasetParams() {
std::vector<std::vector<Tensor>> elements = {
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"})},
{CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}};
return {std::move(elements), kNodeName};
}
ListDatasetParams NestedListDatasetParams() {
std::vector<std::vector<Tensor>> elements = {
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3})},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}};
return {std::move(elements), kNodeName};
}
std::vector<GetNextTestCase<ListDatasetParams>> GetNextTestCases() {
return {
{PlainListDatasetParams(),
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedListDatasetParams(),
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedGetNextTest
: public ListDatasetOpTest,
public ::testing::WithParamInterface<GetNextTestCase<ListDatasetParams>> {
};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
size_t num_tensors_per_element =
test_case.dataset_params.num_tensors_per_element();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_element = 0;
while (true) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (end_of_sequence) {
EXPECT_TRUE(out_tensors.empty());
break;
}
for (int i = 0; i < out_tensors.size(); ++i) {
EXPECT_LT(i + num_tensors_per_element * cur_element,
test_case.expected_outputs.size());
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i + num_tensors_per_element * cur_element]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case
.expected_outputs[i + num_tensors_per_element * cur_element]));
}
}
cur_element++;
}
}
INSTANTIATE_TEST_SUITE_P(ListDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(ListDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ListDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ListDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ListDatasetParams>>
DatasetOutputTypesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_dtypes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetOutputTypesTestCases())
std::vector<DatasetOutputShapesTestCase<ListDatasetParams>>
DatasetOutputShapesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_shapes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ListDatasetParams>>
DatasetCardinalityTestCases() {
return {{PlainListDatasetParams(), 2},
{NestedListDatasetParams(), 2}};
}
DATASET_CARDINALITY_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ListDatasetParams>>
IteratorOutputTypesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_dtypes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
IteratorOutputTypesTestCases())
std::vector<IteratorOutputShapesTestCase<ListDatasetParams>>
IteratorOutputShapesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_shapes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ListDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ListDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ListDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PlainListDatasetParams(),
{0, 1, 2},
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedListDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public ListDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<ListDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_context;
TF_ASSERT_OK(CreateSerializationContext(&serialization_context));
int cur_iteration = 0;
bool end_of_sequence = false;
auto params = static_cast<ListDatasetParams&>(test_case.dataset_params);
int64_t num_elements = params.num_elements();
size_t num_tensors_per_element = params.num_tensors_per_element();
std::vector<Tensor> out_tensors;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_elements) {
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output =
out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i + num_tensors_per_element *
(cur_iteration - 1)]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_element *
(cur_iteration - 1)]));
}
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, "Iterator",
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_SUITE_P(
ListDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(ListDatasetOpTest, SplitProvider) {
auto params =
ListDatasetParams({{CreateTensor<int64_t>(TensorShape({}), {6})},
{CreateTensor<int64_t>(TensorShape({}), {2})},
{CreateTensor<int64_t>(TensorShape({}), {3})},
{CreateTensor<int64_t>(TensorShape({}), {8})},
{CreateTensor<int64_t>(TensorShape({}), {7})},
{CreateTensor<int64_t>(TensorShape({}), {0})},
{CreateTensor<int64_t>(TensorShape({}), {10})}},
kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}),
{{6}, {2}, {3}, {8}, {7}, {0}, {10}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {{2}, {7}})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/list_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/list_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
33e01fd9-60ed-472e-a66b-8e1d12d6260e | cpp | tensorflow/tensorflow | directed_interleave_dataset_op | tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op.cc | tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const
DirectedInterleaveDatasetOp::kDatasetType;
constexpr const char* const
DirectedInterleaveDatasetOp::kSelectorInputDataset;
constexpr const char* const
DirectedInterleaveDatasetOp::kDataInputDatasets;
constexpr const char* const
DirectedInterleaveDatasetOp::kStopOnEmptyDataset;
constexpr const char* const
DirectedInterleaveDatasetOp::kOutputTypes;
constexpr const char* const
DirectedInterleaveDatasetOp::kOutputShapes;
constexpr const char* const
DirectedInterleaveDatasetOp::kNumInputDatasets;
constexpr char kCycleLength[] = "cycle_length";
constexpr char kDataInputImplEmpty[] = "data_input_impl_empty";
constexpr char kSelectorInputImplEmpty[] = "selector_input_impl_empty";
class DirectedInterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* selector_input,
std::vector<DatasetBase*> data_inputs, bool stop_on_empty_dataset)
: DatasetBase(DatasetContext(ctx)),
selector_input_(selector_input),
data_inputs_(std::move(data_inputs)),
stop_on_empty_dataset_(stop_on_empty_dataset) {
selector_input_->Ref();
output_shapes_ = data_inputs_[0]->output_shapes();
data_inputs_[0]->Ref();
for (size_t i = 1; i < data_inputs_.size(); ++i) {
const DatasetBase* data_input = data_inputs_[i];
data_input->Ref();
for (size_t j = 0; j < output_shapes_.size(); ++j) {
output_shapes_[j] = MostSpecificCompatibleShape(
output_shapes_[j], data_input->output_shapes()[j]);
}
}
}
~Dataset() override {
selector_input_->Unref();
for (DatasetBase* data_input : data_inputs_) {
data_input->Unref();
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return data_inputs_[0]->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
for (const auto& input : data_inputs_) {
int64_t n = input->Cardinality(options);
if (n == kInfiniteCardinality) {
return n;
}
}
return kUnknownCardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(selector_input_);
for (const auto& data_input : data_inputs_) {
inputs->push_back(data_input);
}
return absl::OkStatus();
}
Status CheckExternalState() const override {
for (const auto& input : data_inputs_) {
TF_RETURN_IF_ERROR(input->CheckExternalState());
}
return selector_input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* selector_input_node;
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, selector_input_, &selector_input_node));
std::vector<Node*> data_input_nodes(data_inputs_.size());
for (size_t i = 0; i < data_inputs_.size(); ++i) {
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, data_inputs_[i], &data_input_nodes[i]));
}
AttrValue stop_on_empty_dataset_attr;
b->BuildAttrValue(stop_on_empty_dataset_, &stop_on_empty_dataset_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{{0, selector_input_node}},
{{1, data_input_nodes}},
{std::make_pair(kStopOnEmptyDataset, stop_on_empty_dataset_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
num_active_inputs_(params.dataset->data_inputs_.size()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
TF_RETURN_IF_ERROR(dataset()->selector_input_->MakeIterator(
&input_contexts_[0], this, prefix(), &selector_input_impl_));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
data_input_impls_.resize(dataset()->data_inputs_.size());
for (size_t i = 0; i < data_input_impls_.size(); ++i) {
const DatasetBase* data_input = dataset()->data_inputs_[i];
TF_RETURN_IF_ERROR(data_input->MakeIterator(
&input_contexts_[i + 1], this,
strings::StrCat(prefix(), "[", i, "]"), &data_input_impls_[i]));
ctx->MergeCheckpoint(input_contexts_[i + 1].checkpoint());
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!selector_input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (true) {
std::vector<Tensor> selector_result;
*end_of_sequence = false;
TF_RETURN_IF_ERROR(selector_input_impl_->GetNext(
&input_contexts_[0], &selector_result, end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
if (*end_of_sequence) {
ResetInputs();
return absl::OkStatus();
}
int64_t selected_input = selector_result[0].scalar<int64_t>()();
if (selected_input < 0 || selected_input >= data_input_impls_.size()) {
return errors::InvalidArgument(
"Selector index out of range: ", selected_input,
" >= ", data_input_impls_.size());
}
if (data_input_impls_[selected_input]) {
bool end_of_selected_input = false;
TF_RETURN_IF_ERROR(data_input_impls_[selected_input]->GetNext(
&input_contexts_[selected_input + 1], out_tensors,
&end_of_selected_input));
ctx->MergeCheckpoint(
input_contexts_[selected_input + 1].checkpoint());
if (!end_of_selected_input) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(data_input_impls_[selected_input]->prefix());
if (dataset()->stop_on_empty_dataset_) {
*end_of_sequence = true;
ResetInputs();
return absl::OkStatus();
}
data_input_impls_[selected_input].reset();
--num_active_inputs_;
if (num_active_inputs_ == 0) {
selector_input_impl_.reset();
*end_of_sequence = true;
return absl::OkStatus();
}
}
VLOG(2) << "DirectedInterleave selected an exhausted input: "
<< selected_input;
}
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args),
{model::MakeNonTunableParameter(kCycleLength, 1)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kSelectorInputImplEmpty),
static_cast<int64_t>(!selector_input_impl_)));
if (selector_input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, selector_input_impl_));
}
for (size_t i = 0; i < data_input_impls_.size(); ++i) {
const auto& data_input_impl = data_input_impls_[i];
TF_RETURN_IF_ERROR(writer->WriteScalar(
full_name(strings::StrCat(kDataInputImplEmpty, "[", i, "]")),
static_cast<int64_t>(!data_input_impl)));
if (data_input_impl) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, data_input_impl));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kSelectorInputImplEmpty), &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, selector_input_impl_));
} else {
selector_input_impl_.reset();
}
for (size_t i = 0; i < data_input_impls_.size(); ++i) {
TF_RETURN_IF_ERROR(reader->ReadScalar(
full_name(strings::StrCat(kDataInputImplEmpty, "[", i, "]")),
&input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, data_input_impls_[i]));
} else {
data_input_impls_[i].reset();
}
}
return absl::OkStatus();
}
private:
void ResetInputs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
selector_input_impl_.reset();
for (auto& data_input_impl : data_input_impls_) {
data_input_impl.reset();
}
num_active_inputs_ = 0;
}
mutex mu_;
std::vector<IteratorContext> input_contexts_;
std::unique_ptr<IteratorBase> selector_input_impl_ TF_GUARDED_BY(mu_);
std::vector<std::unique_ptr<IteratorBase>> data_input_impls_
TF_GUARDED_BY(mu_);
int64_t num_active_inputs_ TF_GUARDED_BY(mu_);
};
static PartialTensorShape MostSpecificCompatibleShape(
const PartialTensorShape& ts1, const PartialTensorShape& ts2) {
PartialTensorShape output_tensorshape;
if (ts1.dims() != ts2.dims() || ts1.unknown_rank() || ts2.unknown_rank())
return output_tensorshape;
auto dims1 = ts1.dim_sizes();
auto dims2 = ts2.dim_sizes();
for (int d = 0; d < ts1.dims(); ++d) {
if (dims1[d] == dims2[d])
output_tensorshape.Concatenate(dims1[d]);
else
output_tensorshape.Concatenate(-1);
}
return output_tensorshape;
}
const DatasetBase* const selector_input_;
const std::vector<DatasetBase*> data_inputs_;
std::vector<PartialTensorShape> output_shapes_;
const bool stop_on_empty_dataset_;
};
DirectedInterleaveDatasetOp::DirectedInterleaveDatasetOp(
OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
if (ctx->HasAttr(kStopOnEmptyDataset)) {
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kStopOnEmptyDataset, &stop_on_empty_dataset_));
}
}
void DirectedInterleaveDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
DatasetBase* selector_input;
OP_REQUIRES_OK(ctx,
GetDatasetFromVariantTensor(ctx->input(0), &selector_input));
OP_REQUIRES(
ctx,
selector_input->output_dtypes().size() == 1 &&
selector_input->output_dtypes()[0] == DT_INT64 &&
selector_input->output_shapes().size() == 1 &&
selector_input->output_shapes()[0].IsCompatibleWith(
PartialTensorShape({})),
errors::InvalidArgument(
"The selector input must be a dataset of scalar int64 elements."));
std::vector<DatasetBase*> data_inputs;
for (size_t i = 1; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
data_inputs.push_back(input);
OP_REQUIRES(ctx, data_inputs[0]->output_dtypes() == input->output_dtypes(),
errors::InvalidArgument(
"All inputs must have the same output_dtypes. First input "
"has types ",
DataTypeVectorString(data_inputs[0]->output_dtypes()),
", and input ", i - 1, " has types ",
DataTypeVectorString(input->output_dtypes())));
}
*output = new Dataset(ctx, selector_input, std::move(data_inputs),
stop_on_empty_dataset_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("DirectedInterleaveDataset").Device(DEVICE_CPU),
DirectedInterleaveDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalDirectedInterleaveDataset").Device(DEVICE_CPU),
DirectedInterleaveDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "directed_interleave_dataset";
class DirectedInterleaveDatasetParams : public DatasetParams {
public:
template <typename S, typename T>
DirectedInterleaveDatasetParams(S selector_input_dataset_params,
std::vector<T> input_dataset_params_vec,
bool stop_on_empty_dataset,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int num_input_datasets, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
stop_on_empty_dataset_(stop_on_empty_dataset),
num_input_datasets_(input_dataset_params_vec.size()) {
input_dataset_params_.push_back(
std::make_unique<S>(selector_input_dataset_params));
for (auto input_dataset_params : input_dataset_params_vec) {
input_dataset_params_.push_back(
std::make_unique<T>(input_dataset_params));
}
if (!input_dataset_params_vec.empty()) {
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params_vec[0].dataset_type(),
input_dataset_params_vec[0].iterator_prefix());
}
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(
DirectedInterleaveDatasetOp::kSelectorInputDataset);
for (int i = 0; i < num_input_datasets_; ++i) {
input_names->emplace_back(absl::StrCat(
DirectedInterleaveDatasetOp::kDataInputDatasets, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kOutputTypes,
output_dtypes_);
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kOutputShapes,
output_shapes_);
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kNumInputDatasets,
num_input_datasets_);
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kStopOnEmptyDataset,
stop_on_empty_dataset_);
return absl::OkStatus();
}
string dataset_type() const override {
return DirectedInterleaveDatasetOp::kDatasetType;
}
private:
bool stop_on_empty_dataset_;
int32 num_input_datasets_;
};
class DirectedInterleaveDatasetOpTest : public DatasetOpsTestBase {};
DirectedInterleaveDatasetParams AlternateInputsParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams SelectExhaustedInputParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 2, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams OneInputDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 0})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 6, 1)},
false,
{DT_INT64},
{PartialTensorShape({})},
1,
kNodeName);
}
DirectedInterleaveDatasetParams ZeroInputDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 0})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{},
false,
{DT_INT64},
{PartialTensorShape({})},
0,
kNodeName);
}
DirectedInterleaveDatasetParams StopOnEmptyDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 50, 1)},
true,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams SkipEmptyDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 50, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams EmptyInputDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 0})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 0, 1),
RangeDatasetParams(10, 50, 1)},
true,
{DT_INT64},
{PartialTensorShape({})},
0,
kNodeName);
}
DirectedInterleaveDatasetParams LargeNumInputDatasetsParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
5,
kNodeName);
}
DirectedInterleaveDatasetParams SmallNumInputDatasetsParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
1,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidSelectorOuputDataType() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int32>(TensorShape{6}, {0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidSelectorOuputShape() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6, 1},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidSelectorValues() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{2, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidInputDatasetsDataType() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{
RangeDatasetParams(0, 3, 1, {DT_INT32}),
RangeDatasetParams(10, 13, 1, {DT_INT64})},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
std::vector<GetNextTestCase<DirectedInterleaveDatasetParams>>
GetNextTestCases() {
return {{AlternateInputsParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {2}, {12}})}},
{SelectExhaustedInputParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {12}})}},
{OneInputDatasetParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {1}, {2}, {3}, {4}, {5}})}},
{StopOnEmptyDatasetParams(),
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}})}},
{SkipEmptyDatasetParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {1}, {2}, {10}})}},
{EmptyInputDatasetParams(),
{CreateTensors<int64_t>(TensorShape({}), {})}},
{LargeNumInputDatasetsParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {2}, {12}})}},
{SmallNumInputDatasetsParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {2}, {12}})}}};
}
ITERATOR_GET_NEXT_TEST_P(DirectedInterleaveDatasetOpTest,
DirectedInterleaveDatasetParams, GetNextTestCases())
TEST_F(DirectedInterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(DirectedInterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(DirectedInterleaveDatasetOp::kDatasetType)));
}
TEST_F(DirectedInterleaveDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(DirectedInterleaveDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(DirectedInterleaveDatasetOpTest, Cardinality) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(DirectedInterleaveDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(DirectedInterleaveDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(DirectedInterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(DirectedInterleaveDatasetOp::kDatasetType,
dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<DirectedInterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{AlternateInputsParams(),
{0, 5, 8},
CreateTensors<int64_t>(TensorShape{}, {{0}, {10}, {1}, {11}, {2}, {12}}),
true},
{SelectExhaustedInputParams(),
{0, 4, 8},
CreateTensors<int64_t>(TensorShape{}, {{0}, {10}, {1}, {11}, {12}}),
true},
{OneInputDatasetParams(),
{0, 5, 8},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}})}},
{StopOnEmptyDatasetParams(),
{0, 2, 4},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}},
{SkipEmptyDatasetParams(),
{0, 2, 4},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {10}})}},
{EmptyInputDatasetParams(),
{0, 2, 4},
{CreateTensors<int64_t>(TensorShape({}), {})}},
{LargeNumInputDatasetsParams(),
{0, 5, 8},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {10}, {1}, {11}, {2}, {12}})}},
{SmallNumInputDatasetsParams(),
{0, 5, 8},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {10}, {1}, {11}, {2}, {12}})}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(DirectedInterleaveDatasetOpTest,
DirectedInterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(DirectedInterleaveDatasetOpTest, InvalidArguments) {
std::vector<DirectedInterleaveDatasetParams> invalid_params_vec = {
InvalidSelectorOuputDataType(), InvalidSelectorOuputShape(),
InvalidInputDatasetsDataType(), ZeroInputDatasetParams()};
for (auto& dataset_params : invalid_params_vec) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
TEST_F(DirectedInterleaveDatasetOpTest, InvalidSelectorValues) {
auto dataset_params = InvalidSelectorValues();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> next;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a10386a-666e-4dfa-b3e2-222f710c08c7 | cpp | tensorflow/tensorflow | random_dataset_op | tensorflow/core/kernels/data/experimental/random_dataset_op.cc | tensorflow/core/kernels/data/experimental/random_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/random_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const RandomDatasetOp::kDatasetType;
constexpr const char* const RandomDatasetOp::kSeed;
constexpr const char* const RandomDatasetOp::kSeed2;
constexpr const char* const RandomDatasetOp::kOutputTypes;
constexpr const char* const RandomDatasetOp::kOutputShapes;
constexpr const char* const
RandomDatasetOp::kRerandomizeEachIteration;
namespace {
constexpr char kRandomDatasetV1[] = "RandomDataset";
constexpr char kRandomDatasetV2[] = "RandomDatasetV2";
constexpr char kSeedGenerator[] = "SeedGenerator";
constexpr char kEpochNumRandomSamples[] = "epoch_num_random_samples";
constexpr char kNumRandomSamples[] = "num_random_samples";
}
class RandomDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, RandomSeeds&& seeds,
SeedGeneratorManager* manager, ResourceHandle&& resource_handle,
bool owns_resource, int op_version)
: DatasetBase(DatasetContext(ctx)),
seeds_(std::move(seeds)),
op_version_(op_version),
manager_(manager),
resource_handle_(resource_handle),
resource_mgr_(ctx->resource_manager()),
owns_resource_(owns_resource) {}
~Dataset() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s;
}
}
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(std::make_unique<IndexSplitProvider>(kint64max));
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, strings::StrCat(prefix, "::Random")},
manager_->get().get());
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_INT64});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(seeds_.input_seed(), seeds_.input_seed2());
return name_utils::DatasetDebugString(RandomDatasetOp::kDatasetType,
params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return kInfiniteCardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
if (op_version_ == 1) {
return b->AddDataset(this, {seed_node, seed2_node}, output);
}
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
AttrValue rerandomize_each_iteration;
b->BuildAttrValue(manager_->get()->reshuffle_each_iteration(),
&rerandomize_each_iteration);
return b->AddDataset(
this, {seed_node, seed2_node, resource_handle_node},
{std::make_pair(kRerandomizeEachIteration, rerandomize_each_iteration)},
output);
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
Iterator(const Params& params, SeedGenerator* seed_generator)
: DatasetIterator<Dataset>(params),
seed_generator_(seed_generator),
parent_generator_(seed_generator_->seed(), seed_generator_->seed2()),
generator_(&parent_generator_) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
out_tensors->reserve(1);
mutex_lock l(mu_);
out_tensors->emplace_back(ctx->allocator({}), DT_INT64, TensorShape({}));
out_tensors->back().scalar<int64_t>()() = Random();
*end_of_sequence = false;
return absl::OkStatus();
}
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kEpochNumRandomSamples),
seed_generator_->num_random_samples()));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kNumRandomSamples),
num_random_samples_));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kSeed), seed_));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kSeed2), seed2_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t num_random_samples;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kEpochNumRandomSamples),
&num_random_samples));
seed_generator_->set_num_random_samples(num_random_samples);
seed_generator_->Reset();
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kNumRandomSamples),
&num_random_samples_));
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kSeed), &seed_));
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kSeed2), &seed2_));
ResetRngs();
return absl::OkStatus();
}
protected:
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seed_, seed2_);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
private:
random::SingleSampleAdapter<random::PhiloxRandom>::ResultType Random()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
num_random_samples_++;
auto out = generator_();
return out;
}
mutex mu_;
SeedGenerator* const seed_generator_ TF_GUARDED_BY(mu_);
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
int64_t seed_ TF_GUARDED_BY(mu_) = 0;
int64_t seed2_ TF_GUARDED_BY(mu_) = 0;
};
private:
const RandomSeeds seeds_;
const int op_version_;
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const bool owns_resource_;
};
RandomDatasetOp::RandomDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kRandomDatasetV2) {
op_version_ = 2;
} else if (op_name == kRandomDatasetV1) {
op_version_ = 1;
}
if (ctx->HasAttr(kRerandomizeEachIteration)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRerandomizeEachIteration,
&rerandomize_each_iteration_));
}
}
void RandomDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, "seed", &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, "seed2", &seed2));
RandomSeeds seeds(seed, seed2);
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_",
resource_id_counter.fetch_add(1));
SeedGeneratorManager* manager = nullptr;
ResourceHandle handle;
bool owns_resource = true;
if (op_version_ == 2) {
OP_REQUIRES_OK(ctx, HandleFromInput(ctx, 2, &handle));
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
} else {
OP_REQUIRES_OK(ctx, s);
}
}
if (owns_resource) {
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[rerandomize = rerandomize_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (rerandomize) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGenerator>(ctx, container, name);
}
*output = new RandomDatasetOp::Dataset(ctx, std::move(seeds), manager,
std::move(handle), owns_resource,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name(kRandomDatasetV1).Device(DEVICE_CPU),
RandomDatasetOp);
REGISTER_KERNEL_BUILDER(Name(kRandomDatasetV2).Device(DEVICE_CPU),
RandomDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalRandomDataset").Device(DEVICE_CPU),
RandomDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/random_dataset_op.h"
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "random_dataset";
constexpr char kIteratorPrefix[] = "Iterator";
constexpr int kCount = 10;
void GenerateExpectedEpochData(int64_t seed, int64_t seed2, int count,
std::vector<Tensor>* epoch_data) {
auto parent_generator = random::PhiloxRandom(seed, seed2);
auto generator =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator);
for (int i = 0; i < count; ++i) {
epoch_data->push_back(
CreateTensor<int64_t>(TensorShape({}), {generator()}));
}
}
std::vector<Tensor> GenerateExpectedData(int64_t seed, int64_t seed2, int count,
bool rerandomize_each_iteration,
int iterations) {
RandomSeedGenerator parent_seed_generator(RandomSeeds(seed, seed2));
std::vector<Tensor> ret;
for (int j = 0; j < iterations; ++j) {
if (rerandomize_each_iteration) {
parent_seed_generator.GenerateSeeds(&seed, &seed2);
}
GenerateExpectedEpochData(seed, seed2, count, &ret);
}
return ret;
}
std::vector<Tensor> GenerateExpectedSaveAndRestoreData(
int64_t seed, int64_t seed2, int count, bool rerandomize_each_iteration) {
RandomSeedGenerator parent_seed_generator(RandomSeeds(seed, seed2));
if (rerandomize_each_iteration) {
parent_seed_generator.GenerateSeeds(&seed, &seed2);
parent_seed_generator.GenerateSeeds(&seed, &seed2);
}
std::vector<Tensor> ret;
GenerateExpectedEpochData(seed, seed2, count, &ret);
return ret;
}
class RandomDatasetParams : public DatasetParams {
public:
RandomDatasetParams(int64_t seed, int64_t seed2, int32_t op_version,
bool rerandomize_each_iteration,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
seed_(CreateTensor<int64_t>(TensorShape({}), {seed})),
seed2_(CreateTensor<int64_t>(TensorShape({}), {seed2})),
dummy_resource_handle_(CreateDummyResourceHandle()),
seed_generator_resource_(CreateTensor<ResourceHandle>(
TensorShape({}), {dummy_resource_handle_})),
rerandomize_each_iteration_(rerandomize_each_iteration) {
op_version_ = op_version;
}
ResourceHandle CreateDummyResourceHandle() { return ResourceHandle(); }
virtual std::vector<Tensor> GetInputTensors() const override {
return {seed_, seed2_, seed_generator_resource_};
}
virtual Status GetInputNames(
std::vector<string>* input_names) const override {
*input_names = {RandomDatasetOp::kSeed, RandomDatasetOp::kSeed2};
if (op_version_ == 2) {
input_names->emplace_back("seed_generator");
}
return absl::OkStatus();
}
virtual Status GetAttributes(AttributeVector* attributes) const override {
*attributes = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
if (op_version_ == 2) {
attributes->emplace_back("rerandomize_each_iteration",
rerandomize_each_iteration_);
}
return absl::OkStatus();
}
virtual string dataset_type() const override {
return RandomDatasetOp::kDatasetType;
}
private:
Tensor seed_;
Tensor seed2_;
ResourceHandle dummy_resource_handle_;
Tensor seed_generator_resource_;
bool rerandomize_each_iteration_;
};
class RandomDatasetOpTest : public DatasetOpsTestBase {};
RandomDatasetParams FortyTwo() {
return {42,
42,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed() {
return {1000,
42,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2() {
return {42,
1000,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams FortyTwoV2RerandomizeEachIterationFalse() {
return {42,
42,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeedV2RerandomizeEachIterationFalse() {
return {1000,
42,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2V2RerandomizeEachIterationFalse() {
return {42,
1000,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams FortyTwoV2RerandomizeEachIterationTrue() {
return {42,
42,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeedV2RerandomizeEachIterationTrue() {
return {1000,
42,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2V2RerandomizeEachIterationTrue() {
return {42,
1000,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
class ParameterizedGetNextTest : public RandomDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<RandomDatasetParams>> {};
std::vector<GetNextTestCase<RandomDatasetParams>> GetNextTestCases() {
return {{FortyTwo(),
GenerateExpectedData(
42, 42, kCount,
false,
2)},
{ChangeSeed(),
GenerateExpectedData(
1000, 42, kCount,
false,
2)},
{ChangeSeed2(),
GenerateExpectedData(
42, 1000, kCount,
false,
2)},
{FortyTwoV2RerandomizeEachIterationFalse(),
GenerateExpectedData(
42, 42, kCount,
false,
2)},
{ChangeSeedV2RerandomizeEachIterationFalse(),
GenerateExpectedData(
1000, 42, kCount,
false,
2)},
{ChangeSeed2V2RerandomizeEachIterationFalse(),
GenerateExpectedData(
42, 1000, kCount,
false,
2)},
{FortyTwoV2RerandomizeEachIterationTrue(),
GenerateExpectedData(
42, 42, kCount,
true, 2)},
{ChangeSeedV2RerandomizeEachIterationTrue(),
GenerateExpectedData(
1000, 42, kCount,
true, 2)},
{ChangeSeed2V2RerandomizeEachIterationTrue(),
GenerateExpectedData(
42, 1000, kCount,
true, 2)}};
}
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (out_tensors.size() < kCount) {
std::vector<Tensor> next;
TF_ASSERT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
while (out_tensors.size() < 2 * kCount) {
std::vector<Tensor> next;
TF_ASSERT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_ASSERT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(
RandomDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(
std::vector<GetNextTestCase<RandomDatasetParams>>(GetNextTestCases())));
std::vector<DatasetNodeNameTestCase<RandomDatasetParams>>
DatasetNodeNameTestCases() {
return {{FortyTwo(), kNodeName}};
}
DATASET_NODE_NAME_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetNodeNameTestCases());
std::vector<DatasetTypeStringTestCase<RandomDatasetParams>>
DatasetTypeStringTestCases() {
return {{FortyTwo(),
name_utils::OpName(
RandomDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetTypeStringTestCases());
std::vector<DatasetOutputDtypesTestCase<RandomDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{FortyTwo(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetOutputDtypesTestCases());
std::vector<DatasetOutputShapesTestCase<RandomDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FortyTwo(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetOutputShapesTestCases());
std::vector<CardinalityTestCase<RandomDatasetParams>> CardinalityTestCases() {
return {{FortyTwo(),
kInfiniteCardinality}};
}
DATASET_CARDINALITY_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
CardinalityTestCases());
std::vector<IteratorOutputDtypesTestCase<RandomDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{FortyTwo(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputDtypesTestCases());
std::vector<IteratorOutputShapesTestCase<RandomDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FortyTwo(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputShapesTestCases());
std::vector<IteratorPrefixTestCase<RandomDatasetParams>>
IteratorOutputPrefixTestCases() {
return {{FortyTwo(),
name_utils::IteratorPrefix(
RandomDatasetOp::kDatasetType, kIteratorPrefix)}};
}
ITERATOR_PREFIX_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputPrefixTestCases());
std::vector<IteratorSaveAndRestoreTestCase<RandomDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FortyTwo(), {2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
false)},
{FortyTwoV2RerandomizeEachIterationFalse(),
{2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
false)},
{FortyTwoV2RerandomizeEachIterationTrue(),
{2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
true)}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorSaveAndRestoreTestCases());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/random_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/random_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c94decb-6d82-4473-ad0d-6c905e6bb8ac | cpp | tensorflow/tensorflow | lookup_ops | tensorflow/core/ops/lookup_ops.cc | tensorflow/core/ops/lookup_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/dataset_stateful_op_allowlist.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
namespace {
Status TwoElementVectorInputsAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status ScalarAndTwoElementVectorInputsAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
for (int i = 1; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementOutput(InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
}
Status ScalarOutput(InferenceContext* c) {
c->set_output(0, c->Scalar());
return absl::OkStatus();
}
}
REGISTER_OP("LookupTableFind")
.Input("table_handle: Ref(string)")
.Input("keys: Tin")
.Input("default_value: Tout")
.Output("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(2), 1, &unused));
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
Status ValidateTableType(InferenceContext* c,
const ShapeAndType& key_shape_and_type,
const string& key_dtype_attr,
const ShapeAndType& value_shape_and_type,
const string& value_dtype_attr) {
DataType key_dtype;
TF_RETURN_IF_ERROR(c->GetAttr(key_dtype_attr, &key_dtype));
if (key_shape_and_type.dtype != key_dtype) {
return errors::InvalidArgument(
"Trying to read value with wrong dtype. "
"Expected ",
DataTypeString(key_shape_and_type.dtype), " got ",
DataTypeString(key_dtype));
}
DataType value_dtype;
TF_RETURN_IF_ERROR(c->GetAttr(value_dtype_attr, &value_dtype));
if (value_shape_and_type.dtype != value_dtype) {
return errors::InvalidArgument(
"Trying to read value with wrong dtype. "
"Expected ",
DataTypeString(value_shape_and_type.dtype), " got ",
DataTypeString(value_dtype));
}
return absl::OkStatus();
}
Status ValidateTableResourceHandle(InferenceContext* c, ShapeHandle keys,
const string& key_dtype_attr,
const string& value_dtype_attr,
ShapeAndType* output_shape_and_type) {
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data == nullptr || handle_data->size() != 2) {
output_shape_and_type->shape = c->UnknownShape();
output_shape_and_type->dtype = DT_INVALID;
} else {
const ShapeAndType& key_shape_and_type = (*handle_data)[0];
const ShapeAndType& value_shape_and_type = (*handle_data)[1];
TF_RETURN_IF_ERROR(ValidateTableType(c, key_shape_and_type, key_dtype_attr,
value_shape_and_type,
value_dtype_attr));
output_shape_and_type->dtype = value_shape_and_type.dtype;
if (c->RankKnown(key_shape_and_type.shape) && c->RankKnown(keys)) {
int keys_rank = c->Rank(keys);
int key_suffix_rank = c->Rank(key_shape_and_type.shape);
if (keys_rank < key_suffix_rank) {
return errors::InvalidArgument(
"Expected keys to have suffix ",
c->DebugString(key_shape_and_type.shape),
" but saw shape: ", c->DebugString(keys));
}
for (int d = 0; d < key_suffix_rank; d++) {
DimensionHandle dim = c->Dim(key_shape_and_type.shape, d);
TF_RETURN_IF_ERROR(
c->ReplaceDim(keys, keys_rank - key_suffix_rank + d, dim, &keys));
}
std::vector<DimensionHandle> keys_prefix_vec;
keys_prefix_vec.reserve(keys_rank - key_suffix_rank);
for (int d = 0; d < keys_rank - key_suffix_rank; ++d) {
keys_prefix_vec.push_back(c->Dim(keys, d));
}
ShapeHandle keys_prefix = c->MakeShape(keys_prefix_vec);
TF_RETURN_IF_ERROR(c->Concatenate(keys_prefix, value_shape_and_type.shape,
&output_shape_and_type->shape));
} else {
output_shape_and_type->shape = c->UnknownShape();
}
}
return absl::OkStatus();
}
REGISTER_OP("LookupTableFindV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Input("default_value: Tout")
.Output("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
ShapeAndType value_shape_and_type;
TF_RETURN_IF_ERROR(ValidateTableResourceHandle(
c,
c->input(1),
"Tin",
"Tout", &value_shape_and_type));
c->set_output(0, value_shape_and_type.shape);
return absl::OkStatus();
});
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableFindV2");
REGISTER_OP("LookupTableInsert")
.Input("table_handle: Ref(string)")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("LookupTableInsertV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
return absl::OkStatus();
});
REGISTER_OP("LookupTableRemoveV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Attr("Tin: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &handle));
return absl::OkStatus();
});
REGISTER_OP("LookupTableSize")
.Input("table_handle: Ref(string)")
.Output("size: int64")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSize");
REGISTER_OP("LookupTableSizeV2")
.Input("table_handle: resource")
.Output("size: int64")
.SetShapeFn(ScalarAndTwoElementVectorInputsAndScalarOutputs);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSizeV2");
REGISTER_OP("LookupTableExport")
.Input("table_handle: Ref(string)")
.Output("keys: Tkeys")
.Output("values: Tvalues")
.Attr("Tkeys: type")
.Attr("Tvalues: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle values = c->UnknownShape();
TF_RETURN_IF_ERROR(c->WithRankAtLeast(values, 1, &values));
ShapeHandle keys = c->Vector(c->Dim(values, 0));
c->set_output(0, keys);
c->set_output(1, values);
return absl::OkStatus();
});
REGISTER_OP("LookupTableExportV2")
.Input("table_handle: resource")
.Output("keys: Tkeys")
.Output("values: Tvalues")
.Attr("Tkeys: type")
.Attr("Tvalues: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr && handle_data->size() == 2) {
const ShapeAndType& key_shape_and_type = (*handle_data)[0];
const ShapeAndType& value_shape_and_type = (*handle_data)[1];
TF_RETURN_IF_ERROR(ValidateTableType(c, key_shape_and_type,
"Tkeys",
value_shape_and_type,
"Tvalues"));
}
c->set_output(0, c->UnknownShape());
c->set_output(1, c->UnknownShape());
return absl::OkStatus();
});
REGISTER_OP("LookupTableImport")
.Input("table_handle: Ref(string)")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("LookupTableImportV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
ShapeHandle keys;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys));
ShapeHandle values;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 1, &values));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(keys, 0), c->Dim(values, 0), &unused));
return absl::OkStatus();
});
Status MutableHashTableShape(InferenceContext* c, const ShapeHandle& key,
const ShapeHandle& value) {
c->set_output(0, c->Scalar());
ShapeHandle key_s;
TF_RETURN_IF_ERROR(c->WithRankAtMost(key, 1, &key_s));
DataType key_t;
TF_RETURN_IF_ERROR(c->GetAttr("key_dtype", &key_t));
DataType value_t;
TF_RETURN_IF_ERROR(c->GetAttr("value_dtype", &value_t));
c->set_output_handle_shapes_and_types(
0, std::vector<ShapeAndType>{{key_s, key_t}, {value, value_t}});
return absl::OkStatus();
}
Status MutableHashTableShapeFn(InferenceContext* c) {
return MutableHashTableShape(c, c->Scalar(),
c->Scalar());
}
Status MutableHashTableOfTensorsShapeFn(InferenceContext* c) {
PartialTensorShape value_p;
TF_RETURN_IF_ERROR(c->GetAttr("value_shape", &value_p));
ShapeHandle value_s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(value_p, &value_s));
return MutableHashTableShape(c, c->Scalar(), value_s);
}
Status MutableDenseHashTableShapeFn(InferenceContext* c) {
PartialTensorShape value_p;
TF_RETURN_IF_ERROR(c->GetAttr("value_shape", &value_p));
ShapeHandle value_s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(value_p, &value_s));
return MutableHashTableShape(c, c->input(0), value_s);
}
REGISTER_OP("HashTable")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("HashTableV2")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(ScalarOutput);
REGISTER_OP("AnonymousHashTable")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(ScalarOutput);
REGISTER_OP("MutableHashTable")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("MutableHashTableV2")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(MutableHashTableShapeFn);
REGISTER_OP("AnonymousMutableHashTable")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(MutableHashTableShapeFn);
REGISTER_OP("MutableHashTableOfTensors")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("MutableHashTableOfTensorsV2")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.SetIsStateful()
.SetShapeFn(MutableHashTableOfTensorsShapeFn);
REGISTER_OP("AnonymousMutableHashTableOfTensors")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.SetIsStateful()
.SetShapeFn(MutableHashTableOfTensorsShapeFn);
REGISTER_OP("MutableDenseHashTable")
.Input("empty_key: key_dtype")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.Attr("initial_num_buckets: int = 131072")
.Attr("max_load_factor: float = 0.8")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("MutableDenseHashTableV2")
.Input("empty_key: key_dtype")
.Input("deleted_key: key_dtype")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.Attr("initial_num_buckets: int = 131072")
.Attr("max_load_factor: float = 0.8")
.SetIsStateful()
.SetShapeFn(MutableDenseHashTableShapeFn);
REGISTER_OP("AnonymousMutableDenseHashTable")
.Input("empty_key: key_dtype")
.Input("deleted_key: key_dtype")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.Attr("initial_num_buckets: int = 131072")
.Attr("max_load_factor: float = 0.8")
.SetIsStateful()
.SetShapeFn(MutableDenseHashTableShapeFn);
REGISTER_OP("InitializeTable")
.Input("table_handle: Ref(string)")
.Input("keys: Tkey")
.Input("values: Tval")
.Attr("Tkey: type")
.Attr("Tval: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle keys;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys));
TF_RETURN_IF_ERROR(c->Merge(keys, c->input(2), &keys));
return absl::OkStatus();
});
REGISTER_OP("InitializeTableV2")
.Input("table_handle: resource")
.Input("keys: Tkey")
.Input("values: Tval")
.Attr("Tkey: type")
.Attr("Tval: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
ShapeHandle keys;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys));
TF_RETURN_IF_ERROR(c->Merge(keys, c->input(2), &keys));
return absl::OkStatus();
});
REGISTER_OP("InitializeTableFromTextFile")
.Input("table_handle: Ref(string)")
.Input("filename: string")
.Attr("key_index: int >= -2")
.Attr("value_index: int >= -2")
.Attr("vocab_size: int >= -1 = -1")
.Attr("delimiter: string = '\t'")
.Attr("offset: int = 0")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &handle));
return absl::OkStatus();
});
REGISTER_OP("InitializeTableFromTextFileV2")
.Input("table_handle: resource")
.Input("filename: string")
.Attr("key_index: int >= -2")
.Attr("value_index: int >= -2")
.Attr("vocab_size: int >= -1 = -1")
.Attr("delimiter: string = '\t'")
.Attr("offset: int = 0")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &handle));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(LookupOpsTest, LookupTableFindV2_ShapeFn) {
ShapeInferenceTestOp op("LookupTableFindV2");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?");
TF_ASSERT_OK(NodeDefBuilder("test", "LookupTableFindV2")
.Input({"table_handle", 0, DT_RESOURCE})
.Input({"keys", 0, DT_INT64})
.Input({"default_value", 0, DT_FLOAT})
.Attr("Tin", DT_INT64)
.Attr("Tout", DT_FLOAT)
.Finalize(&op.node_def));
std::vector<std::vector<ShapeInferenceTestOp::ShapeAndType>> types;
auto set_types = [&op, &types](DataType key_type, DataType value_type) {
types.emplace_back();
auto& table = types.back();
table.emplace_back("[3]", key_type);
table.emplace_back("[4]", value_type);
op.input_resource_handle_shapes_and_types = {&table, nullptr, nullptr};
};
INFER_OK(op, "[];[?,3];[4]", "?");
set_types(DT_INT32, DT_FLOAT);
INFER_ERROR("read value with wrong dtype", op, "[];[?,3];[4]");
set_types(DT_INT64, DT_INT64);
INFER_ERROR("read value with wrong dtype", op, "[];[?,3];[4]");
set_types(DT_INT64, DT_FLOAT);
INFER_OK(op, "[];[?,3];[4]", "[d1_0,4]");
INFER_OK(op, "[];[1,3];[4]", "[d1_0,4]");
INFER_OK(op, "[];[1,?];[4]", "[d1_0,4]");
}
TEST(LookupOpsTest, LookupTableExportV2_ShapeFn) {
ShapeInferenceTestOp op("LookupTableExportV2");
TF_ASSERT_OK(NodeDefBuilder("test", "LookupTableExportV2")
.Input({"table_handle", 0, DT_RESOURCE})
.Attr("Tkeys", DT_INT64)
.Attr("Tvalues", DT_FLOAT)
.Finalize(&op.node_def));
std::vector<std::vector<ShapeInferenceTestOp::ShapeAndType>> types;
auto set_types = [&op, &types](DataType key_type, DataType value_type) {
types.emplace_back();
auto& table = types.back();
table.emplace_back("[3]", key_type);
table.emplace_back("[4]", value_type);
op.input_resource_handle_shapes_and_types = {&table};
};
set_types(DT_INT32, DT_FLOAT);
INFER_ERROR("read value with wrong dtype", op, "[]");
set_types(DT_INT64, DT_INT64);
INFER_ERROR("read value with wrong dtype", op, "[]");
set_types(DT_INT64, DT_FLOAT);
INFER_OK(op, "[]", "?;?");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/lookup_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/lookup_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a907909a-7bf7-4305-a605-0a452f58af0a | cpp | tensorflow/tensorflow | sampling_dataset_op | tensorflow/core/kernels/data/experimental/sampling_dataset_op.cc | tensorflow/core/kernels/data/experimental/sampling_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/sampling_dataset_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/simple_philox.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const SamplingDatasetOp::kDatasetType;
constexpr const char* const SamplingDatasetOp::kInputDataset;
constexpr const char* const SamplingDatasetOp::kRate;
constexpr const char* const SamplingDatasetOp::kSeed;
constexpr const char* const SamplingDatasetOp::kSeed2;
constexpr const char* const SamplingDatasetOp::kOutputTypes;
constexpr const char* const SamplingDatasetOp::kOutputShapes;
class SamplingDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, float rate, int64_t seed, int64_t seed2,
const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)),
rate_(rate),
seeds_(seed, seed2),
input_(input) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::unique_ptr<IteratorBase>(
new Iterator({this, name_utils::IteratorPrefix(kDatasetType, prefix)},
seeds_.first, seeds_.second));
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* rate = nullptr;
Node* seed = nullptr;
Node* seed2 = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(rate_, &rate));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.first, &seed));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.second, &seed2));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, rate, seed, seed2}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params, int64_t seed, int64_t seed2)
: DatasetIterator<Dataset>(params),
seeds_(MaybeOverrideSeeds({seed, seed2})),
parent_generator_(seeds_.first, seeds_.second),
generator_(&parent_generator_) {}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
bool rand_val_hit;
do {
{
tf_shared_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
}
if (*end_of_sequence) {
mutex_lock l(mu_);
input_impl_.reset();
return absl::OkStatus();
}
float rand_val = Random();
rand_val_hit = rand_val < dataset()->rate_;
if (!rand_val_hit) {
out_tensors->clear();
}
} while (!rand_val_hit);
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seeds_.first, seeds_.second);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
this->full_name("num_random_samples"), num_random_samples_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(this->full_name("seed"), seeds_.first));
TF_RETURN_IF_ERROR(
writer->WriteScalar(this->full_name("seed2"), seeds_.second));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name("input_impl_empty"), ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->full_name("num_random_samples"), &num_random_samples_));
int64_t seed;
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name("seed"), &seed));
int64_t seed2;
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name("seed2"), &seed2));
seeds_ = {seed, seed2};
ResetRngs();
if (!reader->Contains(full_name("input_impl_empty"))) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
mutex mu_;
std::pair<int64_t, int64_t> seeds_ TF_GUARDED_BY(mu_);
private:
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
float Random() {
mutex_lock l(mu_);
num_random_samples_++;
uint32 random_uint = generator_();
return random::Uint32ToFloat(random_uint);
}
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
};
const float rate_;
const std::pair<int64_t, int64_t> seeds_;
const DatasetBase* const input_;
};
SamplingDatasetOp::SamplingDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void SamplingDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
float rate;
int64_t seed;
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<float>(ctx, kRate, &rate));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
*output = new Dataset(ctx, rate, seed, seed2, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SamplingDataset").Device(DEVICE_CPU),
SamplingDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/sampling_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "sampling_dataset";
constexpr int64_t kRandomSeed = 42;
constexpr int64_t kRandomSeed2 = 7;
class SamplingDatasetParams : public DatasetParams {
public:
template <typename T>
SamplingDatasetParams(T input_dataset_params, float rate,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
rate_(rate) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
Tensor rate = CreateTensor<float>(TensorShape({}), {rate_});
Tensor seed_tensor = CreateTensor<int64_t>(TensorShape({}), {seed_tensor_});
Tensor seed2_tensor =
CreateTensor<int64_t>(TensorShape({}), {seed2_tensor_});
return {rate, seed_tensor, seed2_tensor};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {SamplingDatasetOp::kInputDataset, SamplingDatasetOp::kRate,
SamplingDatasetOp::kSeed, SamplingDatasetOp::kSeed2};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{SamplingDatasetOp::kOutputTypes, output_dtypes_},
{SamplingDatasetOp::kOutputShapes, output_shapes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return SamplingDatasetOp::kDatasetType;
}
private:
float rate_;
int64_t seed_tensor_ = kRandomSeed;
int64_t seed2_tensor_ = kRandomSeed2;
};
class SamplingDatasetOpTest : public DatasetOpsTestBase {};
SamplingDatasetParams OneHundredPercentSampleParams() {
return SamplingDatasetParams(RangeDatasetParams(0, 3, 1),
1.0,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
SamplingDatasetParams TenPercentSampleParams() {
return SamplingDatasetParams(RangeDatasetParams(0, 20, 1),
0.1,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
SamplingDatasetParams ZeroPercentSampleParams() {
return SamplingDatasetParams(RangeDatasetParams(0, 20, 1),
0.0,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<SamplingDatasetParams>> GetNextTestCases() {
return {
{OneHundredPercentSampleParams(),
CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}})},
{TenPercentSampleParams(),
CreateTensors<int64_t>(TensorShape({}),
{{9}, {11}, {19}})},
{ZeroPercentSampleParams(), {}}};
}
ITERATOR_GET_NEXT_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
GetNextTestCases())
std::vector<DatasetNodeNameTestCase<SamplingDatasetParams>>
DatasetNodeNameTestCases() {
return {{TenPercentSampleParams(),
kNodeName}};
}
DATASET_NODE_NAME_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetNodeNameTestCases())
std::vector<DatasetTypeStringTestCase<SamplingDatasetParams>>
DatasetTypeStringTestCases() {
return {{TenPercentSampleParams(),
name_utils::OpName(
SamplingDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetTypeStringTestCases())
std::vector<DatasetOutputDtypesTestCase<SamplingDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{TenPercentSampleParams(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SamplingDatasetParams>>
DatasetOutputShapesTestCases() {
return {{TenPercentSampleParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SamplingDatasetParams>> CardinalityTestCases() {
return {{OneHundredPercentSampleParams(),
kUnknownCardinality},
{TenPercentSampleParams(),
kUnknownCardinality},
{ZeroPercentSampleParams(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<SamplingDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{TenPercentSampleParams(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<SamplingDatasetParams>>
IteratorOutputShapesTestCases() {
return {{TenPercentSampleParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<SamplingDatasetParams>>
IteratorOutputPrefixTestCases() {
return {{TenPercentSampleParams(),
name_utils::IteratorPrefix(
SamplingDatasetOp::kDatasetType,
TenPercentSampleParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorOutputPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<SamplingDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{OneHundredPercentSampleParams(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{TenPercentSampleParams(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{9}, {11}, {19}})},
{ZeroPercentSampleParams(),
{0, 2, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/sampling_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/sampling_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
27ddbda0-b0b4-4bd1-becc-a1d64dcc555f | cpp | tensorflow/tensorflow | assert_prev_dataset_op | tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.cc | tensorflow/core/kernels/data/experimental/assert_prev_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.h"
#include <map>
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr char AssertPrevDatasetOp::kInputDataset[];
constexpr char AssertPrevDatasetOp::kDatasetType[];
constexpr char AssertPrevDatasetOp::kTransformations[];
constexpr char AssertPrevDatasetOp::kOutputTypes[];
constexpr char AssertPrevDatasetOp::kOutputShapes[];
namespace {
absl::StatusOr<NameAttrList> GetAssertions(const tstring& transformation) {
NameAttrList assertions;
if (!std::is_base_of<protobuf::Message, NameAttrList>()) {
return errors::InvalidArgument(
"Portable proto implementations are not supported.");
}
if (!protobuf::TextFormat::ParseFromString(
transformation, reinterpret_cast<protobuf::Message*>(&assertions))) {
return errors::InvalidArgument("Couldn't parse transformation '",
transformation, "'.");
}
return assertions;
}
absl::StatusOr<const DatasetBase*> GetPreviousDataset(
const DatasetBase& dataset) {
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset.InputDatasets(&inputs));
if (inputs.empty()) {
return errors::InvalidArgument("No previous transformation found.");
}
return inputs.back();
}
Status CheckOpName(const DatasetBase& dataset, const NameAttrList& assertions) {
if (!MatchesAnyVersion(assertions.name(), dataset.type_string())) {
return errors::InvalidArgument("Asserted transformation matching '",
assertions.name(), "', but found '",
dataset.type_string(), "'.");
}
return absl::OkStatus();
}
absl::StatusOr<NodeDef> GetDatasetNode(const DatasetBase& dataset,
absl::string_view op_name) {
SerializationContext serialization_ctx((SerializationContext::Params()));
GraphDefBuilder b;
GraphDef graph_def;
TF_RETURN_IF_ERROR(
AsGraphDef(&dataset, std::move(serialization_ctx), &graph_def));
TF_ASSIGN_OR_RETURN(NodeDef node, GetDatasetNodeDef(graph_def));
return node;
}
Status CheckAttributes(const DatasetBase& dataset,
const NameAttrList& assertions) {
if (assertions.attr().empty()) return absl::OkStatus();
TF_ASSIGN_OR_RETURN(NodeDef node, GetDatasetNode(dataset, assertions.name()));
std::vector<std::string> attrs_not_found;
for (const auto& attr : assertions.attr()) {
auto it = node.attr().find(attr.first);
if (it != node.attr().end()) {
if (!std::is_base_of<protobuf::Message, AttrValue>()) {
return errors::InvalidArgument(
"Portable proto implementations are not supported.");
}
if (!protobuf::util::MessageDifferencer::Equivalent(
*reinterpret_cast<const protobuf::Message*>(&it->second),
*reinterpret_cast<const protobuf::Message*>(&attr.second))) {
return errors::InvalidArgument(
"Asserted attribute '", attr.first, "' having a value of '",
attr.second.DebugString(), "', but found value of '",
it->second.DebugString(), "'.");
}
} else {
return errors::InvalidArgument(
"Asserted attribute '", attr.first, "' having a value of '",
attr.second.DebugString(), "', but found no such attribute defined.");
}
}
return absl::OkStatus();
}
Status CheckTransformation(const DatasetBase& dataset,
const tstring& transformation) {
TF_ASSIGN_OR_RETURN(NameAttrList assertions, GetAssertions(transformation));
TF_RETURN_IF_ERROR(CheckOpName(dataset, assertions));
TF_RETURN_IF_ERROR(CheckAttributes(dataset, assertions));
return absl::OkStatus();
}
}
class AssertPrevDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const std::vector<tstring>& transformations,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
transformations_(transformations),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* transformations_node = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, transformations_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
const DatasetBase* current_dataset = dataset();
for (int i = 0; i < dataset()->transformations_.size(); ++i) {
absl::StatusOr<const DatasetBase*> previous_dataset =
GetPreviousDataset(*current_dataset);
if (!previous_dataset.ok()) {
return errors::InvalidArgument(
"Asserted previous ", dataset()->transformations_.size(),
" transformations but encountered only ", i, ".");
}
Status s = CheckTransformation(**previous_dataset,
dataset()->transformations_[i]);
if (!s.ok()) {
return errors::InvalidArgument(
"Failure checking transformations at offset ", i, ": ",
s.message());
}
current_dataset = *previous_dataset;
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
};
const DatasetBase* input_;
const std::vector<tstring> transformations_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
};
AssertPrevDatasetOp::AssertPrevDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void AssertPrevDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::vector<tstring> transformations;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kTransformations,
&transformations));
*output =
new Dataset(ctx, input, transformations, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AssertPrevDataset").Device(DEVICE_CPU),
AssertPrevDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "assert_prev_dataset";
std::string GetTransformation(
absl::string_view name,
std::initializer_list<std::pair<std::string, bool>> attrs = {}) {
NameAttrList message;
message.set_name(absl::StrCat(name, "Dataset"));
for (const auto& attr : attrs) {
AttrValue value;
value.set_b(attr.second);
message.mutable_attr()->insert({attr.first, value});
}
std::string output;
protobuf::TextFormat::PrintToString(message, &output);
return output;
}
class AssertPrevDatasetParams : public DatasetParams {
public:
template <typename T>
AssertPrevDatasetParams(T input_dataset_params,
const std::vector<tstring>& transformations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
transformations_(transformations) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
int num_transformations = transformations_.size();
return {CreateTensor<tstring>(TensorShape({num_transformations}),
transformations_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() + 1);
input_names->emplace_back(AssertPrevDatasetOp::kInputDataset);
input_names->emplace_back(AssertPrevDatasetOp::kTransformations);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{AssertPrevDatasetOp::kOutputShapes, output_shapes_},
{AssertPrevDatasetOp::kOutputTypes, output_dtypes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return AssertPrevDatasetOp::kDatasetType;
}
private:
std::vector<tstring> transformations_;
};
class AssertPrevDatasetOpTest : public DatasetOpsTestBase {};
AssertPrevDatasetParams AssertPrevDatasetParams1() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType)},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams AssertPrevDatasetParams2() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(RangeDatasetOp::kDatasetType)},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams AssertPrevDatasetParams2WithAttrs() {
TakeDatasetParams take_dataset_params = TakeDatasetParams(
TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset"),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(TensorSliceDatasetOp::kDatasetType,
{{"is_files", false}})},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams InvalidAssertPrevDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation("Whoops")},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams ShortAssertPrevDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(RangeDatasetOp::kDatasetType),
GetTransformation("Whoops")},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AssertPrevDatasetParams>> GetNextTestCases() {
return {{AssertPrevDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertPrevDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_GET_NEXT_TEST_P(AssertPrevDatasetOpTest, AssertPrevDatasetParams,
GetNextTestCases())
TEST_F(AssertPrevDatasetOpTest, DatasetNodeName) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertPrevDatasetOpTest, DatasetAttrs) {
auto dataset_params = AssertPrevDatasetParams2WithAttrs();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertPrevDatasetOpTest, DatasetTypeString) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(AssertPrevDatasetOp::kDatasetType)));
}
TEST_F(AssertPrevDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(AssertPrevDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertPrevDatasetOpTest, Cardinality) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(3));
}
TEST_F(AssertPrevDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(AssertPrevDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertPrevDatasetOpTest, IteratorPrefix) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
AssertPrevDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<AssertPrevDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{AssertPrevDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertPrevDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(AssertPrevDatasetOpTest,
AssertPrevDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(AssertPrevDatasetOpTest, InvalidArguments) {
auto dataset_params = InvalidAssertPrevDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(AssertPrevDatasetOpTest, ShortAssertPrev) {
auto dataset_params = ShortAssertPrevDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_prev_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37dcc1c8-b16f-4fd8-a653-6211c63fd914 | cpp | tensorflow/tensorflow | auto_shard_dataset_op | tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.cc | tensorflow/core/kernels/data/experimental/auto_shard_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const AutoShardDatasetOp::kAutoShardPolicy;
constexpr const char* const AutoShardDatasetOp::kDatasetType;
constexpr const char* const AutoShardDatasetOp::kInputDataset;
constexpr const char* const AutoShardDatasetOp::kNumWorkers;
constexpr const char* const AutoShardDatasetOp::kNumReplicas;
constexpr const char* const AutoShardDatasetOp::kIndex;
constexpr const char* const AutoShardDatasetOp::kOutputTypes;
constexpr const char* const AutoShardDatasetOp::kOutputShapes;
constexpr char kOptimizerName[] = "tf_auto_shard";
AutoShardDatasetOp::AutoShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), auto_shard_policy_(0) {
if (ctx->HasAttr(kAutoShardPolicy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kAutoShardPolicy, &auto_shard_policy_));
}
if (ctx->HasAttr(kNumReplicas)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kNumReplicas, &num_replicas_));
}
}
void AutoShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index, num_workers, auto_shard_policy, num_replicas;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kNumWorkers, &num_workers));
OP_REQUIRES(
ctx, num_workers > 0,
errors::InvalidArgument("num_workers must be greater than zero."));
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kIndex, &index));
OP_REQUIRES(
ctx, index >= 0 && index < num_workers,
errors::InvalidArgument("index must be between 0 and ", num_workers - 1));
auto_shard_policy = auto_shard_policy_;
if (input->options().distribute_options().auto_shard_policy() !=
AutoShardPolicy::AUTO) {
auto_shard_policy =
input->options().distribute_options().auto_shard_policy();
}
num_replicas = num_replicas_;
auto config_factory = [num_workers, index, auto_shard_policy,
num_replicas]() {
return CreateConfig(num_workers, index, auto_shard_policy, num_replicas);
};
core::RefCountPtr<DatasetBase> rewritten;
OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten));
*output = rewritten.release();
}
RewriterConfig AutoShardDatasetOp::CreateConfig(int64_t num_workers,
int64_t index,
int64_t auto_shard_policy,
int64_t num_replicas) {
RewriterConfig rewriter_config;
rewriter_config.set_fail_on_optimizer_errors(true);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.add_optimizers(kOptimizerName);
auto custom_optimizer = rewriter_config.add_custom_optimizers();
custom_optimizer->set_name(kOptimizerName);
const std::array<std::pair<const char* const, int64_t>, 4> attr_pairs = {
{{kNumWorkers, num_workers},
{kIndex, index},
{kAutoShardPolicy, auto_shard_policy},
{kNumReplicas, num_replicas}}};
for (const auto& pair : attr_pairs) {
AttrValue attr;
attr.set_i(pair.second);
(*custom_optimizer->mutable_parameter_map())[pair.first] = attr;
}
return rewriter_config;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AutoShardDataset").Device(DEVICE_CPU),
AutoShardDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalAutoShardDataset").Device(DEVICE_CPU),
AutoShardDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include <string>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "auto_shard_dataset";
class AutoShardDatasetParams : public DatasetParams {
public:
template <typename T>
AutoShardDatasetParams(T input_dataset_params, int64_t num_workers,
int64_t index, int auto_shard_policy,
int64_t num_replicas, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_workers_(num_workers),
num_replicas_(num_replicas),
index_(index),
auto_shard_policy_(auto_shard_policy) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_workers_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(AutoShardDatasetOp::kInputDataset);
input_names->emplace_back(AutoShardDatasetOp::kNumWorkers);
input_names->emplace_back(AutoShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(AutoShardDatasetOp::kAutoShardPolicy,
auto_shard_policy_);
attr_vector->emplace_back(AutoShardDatasetOp::kNumReplicas, num_replicas_);
attr_vector->emplace_back(AutoShardDatasetOp::kOutputTypes, output_dtypes_);
attr_vector->emplace_back(AutoShardDatasetOp::kOutputShapes,
output_shapes_);
return absl::OkStatus();
}
string dataset_type() const override {
return AutoShardDatasetOp::kDatasetType;
}
private:
int64_t num_workers_;
int64_t num_replicas_;
int64_t index_;
int auto_shard_policy_;
};
class AutoShardDatasetOpTest : public DatasetOpsTestBase {};
AutoShardDatasetParams AutoShardDatasetParams1() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams2() {
return AutoShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams3() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
0,
4,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams4() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams5() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams6() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams7() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AutoShardDatasetParams>> GetNextTestCases() {
return {
{AutoShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{AutoShardDatasetParams2(),
{}},
{AutoShardDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})}};
}
ITERATOR_GET_NEXT_TEST_P(AutoShardDatasetOpTest, AutoShardDatasetParams,
GetNextTestCases())
TEST_F(AutoShardDatasetOpTest, InvalidArguments) {
std::vector<AutoShardDatasetParams> invalid_dataset_params = {
AutoShardDatasetParams4(), AutoShardDatasetParams5(),
AutoShardDatasetParams6(), AutoShardDatasetParams7()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
REGISTER_OP("AutoShardDatasetOpTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"));
static void add_identity_nodes(Node* node, Graph& graph,
std::vector<Node*>& identity_nodes) {
for (int i = 0; i < node->num_outputs(); i++) {
Node* new_node;
std::string name = absl::StrCat("Identity", i);
TF_EXPECT_OK(NodeBuilder(name, "Identity")
.Attr("T", node->output_type(i))
.Input(node, i)
.Finalize(&graph, &new_node));
identity_nodes.push_back(new_node);
}
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST_F(AutoShardDatasetOpTest, AutoShardDatasetTypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* num_workers;
Node* index;
Node* auto_shard_dataset;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_workers", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &num_workers));
TF_EXPECT_OK(NodeBuilder("index", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &index));
TF_EXPECT_OK(NodeBuilder("AutoShardDataset", "AutoShardDataset")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(num_workers)
.Input(index)
.Finalize(&graph, &auto_shard_dataset));
std::vector<Node*> identity_nodes;
add_identity_nodes(auto_shard_dataset, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
TEST_F(AutoShardDatasetOpTest, RebatchDatasetTypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* num_replicas;
Node* rebatch_dataset;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_replicas", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &num_replicas));
TF_EXPECT_OK(NodeBuilder("RebatchDataset", "RebatchDataset")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(num_replicas)
.Finalize(&graph, &rebatch_dataset));
std::vector<Node*> identity_nodes;
add_identity_nodes(rebatch_dataset, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
TEST_F(AutoShardDatasetOpTest, RebatchDatasetV2TypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* batch_sizes;
Node* drop_remainder;
Node* rebatch_dataset_v2;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_replicas", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &batch_sizes));
TF_EXPECT_OK(
NodeBuilder("drop_remainder", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_BOOL)
.Finalize(&graph, &drop_remainder));
TF_EXPECT_OK(NodeBuilder("RebatchDatasetV2", "RebatchDatasetV2")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(batch_sizes)
.Input(drop_remainder)
.Finalize(&graph, &rebatch_dataset_v2));
std::vector<Node*> identity_nodes;
add_identity_nodes(rebatch_dataset_v2, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/auto_shard_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff12cf31-34c5-4a43-9350-464f3f27d495 | cpp | tensorflow/tensorflow | map_and_batch_dataset_op | tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc | tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.h"
#include <atomic>
#include <functional>
#include <utility>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/inplace_ops_functor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const MapAndBatchDatasetOp::kDatasetType;
constexpr const char* const MapAndBatchDatasetOp::kInputDataset;
constexpr const char* const MapAndBatchDatasetOp::kOtherArguments;
constexpr const char* const MapAndBatchDatasetOp::kBatchSize;
constexpr const char* const
MapAndBatchDatasetOp::kNumParallelCalls;
constexpr const char* const MapAndBatchDatasetOp::kDropRemainder;
constexpr const char* const MapAndBatchDatasetOp::kFunc;
constexpr const char* const MapAndBatchDatasetOp::kTarguments;
constexpr const char* const MapAndBatchDatasetOp::kOutputTypes;
constexpr const char* const MapAndBatchDatasetOp::kOutputShapes;
constexpr const char* const
MapAndBatchDatasetOp::kPreserveCardinality;
namespace {
constexpr int64_t kMaxBatchResults = 16;
constexpr char kParallelism[] = "parallelism";
constexpr char kCallCounter[] = "call_counter";
constexpr char kBatchResultsSize[] = "batch_results_size";
constexpr char kTFDataMapAndBatch[] = "tf_data_map_and_batch";
constexpr char kBatchResults[] = "batch_results";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kNumCalls[] = "num_calls";
constexpr char kNumElements[] = "num_elements";
constexpr char kOutputAllocated[] = "output_allocated";
constexpr char kStatus[] = "status";
inline int64_t CeilDiv(int64_t x, int64_t y) { return (x + y - 1) / y; }
}
class MapAndBatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t batch_size,
int64_t num_parallel_calls, bool drop_remainder,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality)
: DatasetBase(DatasetContext(ctx)),
input_(input),
batch_size_(batch_size),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
output_types_(output_types),
output_shapes_(output_shapes),
captured_func_(std::move(captured_func)),
preserve_cardinality_(preserve_cardinality),
traceme_metadata_(
{{"autotune",
num_parallel_calls == model::kAutotune ? "true" : "false"},
{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"}}) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (!preserve_cardinality_) {
return kUnknownCardinality;
}
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size_node;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size_node));
Node* num_parallel_calls_node;
TF_RETURN_IF_ERROR(
b->AddScalar(num_parallel_calls_, &num_parallel_calls_node));
Node* drop_remainder_node;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
AttrValue preserve_cardinality_attr;
b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node),
std::make_pair(2, batch_size_node),
std::make_pair(3, num_parallel_calls_node),
std::make_pair(4, drop_remainder_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f),
std::make_pair(kTarguments, other_arguments_types_attr),
std::make_pair(kPreserveCardinality,
preserve_cardinality_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)) {
max_batch_results_ = std::min(
kMaxBatchResults,
CeilDiv(params.dataset->num_parallel_calls_ == model::kAutotune
? GetCpuBudget()
: params.dataset->num_parallel_calls_,
params.dataset->batch_size_));
}
~Iterator() override {
CancelThreads(true);
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
IteratorContext iter_ctx(params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&iter_ctx, this, prefix(), &input_impl_));
ctx->MergeCheckpoint(iter_ctx.checkpoint());
TF_RETURN_IF_ERROR(dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_));
if (ctx->warm_start() && !ctx->is_restoring()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<BatchResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (!cancelled_ && (batch_results_.empty() ||
batch_results_.front()->num_calls > 0)) {
++waiting_;
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
--waiting_;
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
std::swap(result, batch_results_.front());
batch_results_.pop_front();
cond_var_->notify_all();
}
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("MapAndBatchConsume",
{{"element_id", result->uid}});
});
auto cleanup = gtl::MakeCleanup([result] { result->output.clear(); });
mutex_lock l(result->mu);
if (result->output_allocated) {
RecordBufferDequeue(ctx, result->output);
}
ctx->MergeCheckpoint(&result->checkpoint);
TF_RETURN_IF_ERROR(
ProcessBatch(dataset()->batch_size_, result->num_elements,
dataset()->drop_remainder_, result->status, ctx,
out_tensors, end_of_sequence, &result->output));
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncKnownRatioNode(
std::move(args), dataset()->batch_size_,
{model::MakeParameter(kParallelism, num_parallel_calls_, 1,
ctx->runner_threadpool_size())});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
if (ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCallCounter, 0));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kBatchResultsSize, 0));
return absl::OkStatus();
}
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
DCHECK_EQ(num_calls_, 0);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCallCounter, call_counter_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kBatchResultsSize,
batch_results_.size()));
for (size_t i = 0; i < batch_results_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteBatchResult(writer, i));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
DCHECK(!runner_thread_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCallCounter, &call_counter_));
int64_t batch_results_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBatchResultsSize, &batch_results_size));
DCHECK(batch_results_.empty());
for (int i = 0; i < batch_results_size; ++i) {
TF_RETURN_IF_ERROR(ReadBatchResult(ctx, reader, i));
}
if (ctx->warm_start()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
int64_t max_batch_results = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
max_batch_results = max_batch_results_;
mu_->unlock();
}
auto result = dataset()->traceme_metadata_;
result.push_back(std::make_pair(
"max_batch_results",
strings::Printf("%lld", static_cast<long long>(max_batch_results))));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct BatchResult {
explicit BatchResult(int64_t batch_size, IteratorContext* ctx)
: end_of_input(false),
num_elements(0),
output_allocated(false),
status(absl::OkStatus()),
status_offset(-1),
num_calls(batch_size),
checkpoint(MemoryCheckpoint{ctx->id_registry()}),
uid(tensorflow::EnvTime::NowNanos()) {}
void UpdateStatus(const Status& s, int64_t offset) {
if (TF_PREDICT_FALSE(!s.ok())) {
mutex_lock l(mu);
if (status.ok() || offset < status_offset) {
status = s;
status_offset = offset;
}
}
}
mutex mu;
bool end_of_input TF_GUARDED_BY(mu);
int64_t num_elements TF_GUARDED_BY(mu);
std::vector<Tensor> output;
bool output_allocated TF_GUARDED_BY(mu);
Status status TF_GUARDED_BY(mu);
int64_t status_offset TF_GUARDED_BY(mu);
int64_t num_calls TF_GUARDED_BY(&Iterator::mu_);
MemoryCheckpoint checkpoint TF_GUARDED_BY(mu);
const uint64 uid = -1;
};
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<BatchResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->num_calls--;
const auto& stats_aggregator = ctx->stats_aggregator();
if (stats_aggregator) {
stats_aggregator->AddScalar(
stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),
static_cast<float>(num_calls_) /
static_cast<float>(num_parallel_calls_->value),
num_elements());
}
cond_var_->notify_all();
}
void CallFunction(std::shared_ptr<IteratorContext> ctx,
const std::shared_ptr<BatchResult>& result,
int64_t offset) TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("MapAndBatchProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
bool end_of_input = false;
Status status =
input_impl_->GetNext(ctx.get(), &input_element, &end_of_input);
bool return_early;
{
mutex_lock l(result->mu);
result->checkpoint.Merge(ctx->checkpoint());
result->end_of_input = result->end_of_input || end_of_input;
result->status.Update(status);
return_early = result->end_of_input || !result->status.ok();
}
if (return_early) {
CallCompleted(ctx, result);
return;
}
std::shared_ptr<std::vector<Tensor>> return_values =
std::make_shared<std::vector<Tensor>>();
auto done = [this, ctx, result, return_values, offset](Status status) {
if (dataset()->preserve_cardinality_ && errors::IsOutOfRange(status)) {
status = errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
status.message());
}
result->UpdateStatus(status, offset);
if (status.ok()) {
Status allocate_status =
EnsureOutputAllocated(ctx, result, return_values);
if (!allocate_status.ok()) {
result->UpdateStatus(allocate_status, offset);
} else {
for (size_t i = 0; i < return_values->size(); ++i) {
Tensor& tensor = return_values->at(i);
Tensor* batch = &(result->output)[i];
if (tensor.NumElements() !=
(batch->NumElements() / batch->dim_size(0))) {
TensorShape batch_shape = batch->shape();
batch_shape.RemoveDim(0);
result->UpdateStatus(
errors::InvalidArgument(
"Cannot add tensor to the batch: number of elements "
"does not match. Shapes are: [tensor]: ",
tensor.shape().DebugString(),
", [batch]: ", batch_shape.DebugString()),
offset);
break;
}
Status copy_status = batch_util::CopyElementToSlice(
std::move(tensor), batch, offset);
if (!copy_status.ok()) {
result->UpdateStatus(copy_status, offset);
break;
}
}
}
{
mutex_lock l(result->mu);
result->num_elements++;
}
}
CallCompleted(ctx, result);
};
instantiated_captured_func_->RunAsync(ctx.get(), std::move(input_element),
return_values.get(),
std::move(done), model_node());
}
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto new_ctx = std::make_shared<IteratorContext>(*ctx);
runner_thread_ =
ctx->StartThread(kTFDataMapAndBatch,
std::bind(&Iterator::RunnerThread, this, new_ctx));
}
}
Status EnsureOutputAllocated(
const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<BatchResult>& result,
const std::shared_ptr<std::vector<Tensor>>& return_values) {
mutex_lock l(result->mu);
if (result->output_allocated) {
return absl::OkStatus();
}
const size_t num_components = return_values->size();
result->output.reserve(num_components);
for (size_t i = 0; i < num_components; ++i) {
TensorShape component_shape({dataset()->batch_size_});
component_shape.AppendShape(return_values->at(i).shape());
AllocatorAttributes attr;
attr.set_gpu_compatible(true);
result->output.emplace_back(ctx->allocator(attr),
return_values->at(i).dtype(),
component_shape);
if (!result->output.back().IsInitialized()) {
return errors::ResourceExhausted(
"Failed to allocate memory for the batch of component ", i);
}
}
RecordBufferEnqueue(ctx.get(), result->output);
result->output_allocated = true;
return absl::OkStatus();
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
std::vector<std::pair<std::shared_ptr<BatchResult>, int64_t>> new_calls;
RecordStart(ctx.get());
auto stop_cleanup =
gtl::MakeCleanup([this, &ctx]() { RecordStop(ctx.get()); });
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
(batch_results_.size() > max_batch_results_ ||
(batch_results_.size() == max_batch_results_ &&
call_counter_ % dataset()->batch_size_ == 0));
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
if (waiting_ > 0 && num_calls_ < num_parallel_calls_->value &&
max_batch_results_ < kMaxBatchResults) {
max_batch_results_++;
continue;
}
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
if (call_counter_ % dataset()->batch_size_ == 0) {
batch_results_.push_back(std::make_shared<BatchResult>(
dataset()->batch_size_, ctx.get()));
}
int64_t offset = call_counter_++ % dataset()->batch_size_;
new_calls.emplace_back(batch_results_.back(), offset);
num_calls_++;
}
}
const auto& stats_aggregator = ctx->stats_aggregator();
if (stats_aggregator) {
mutex_lock l(*mu_);
stats_aggregator->AddScalar(
stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),
static_cast<float>(num_calls_) /
static_cast<float>(num_parallel_calls_->value),
num_elements());
}
for (const auto& call : new_calls) {
CallFunction(ctx, call.first, call.second);
}
new_calls.clear();
}
}
Status ReadBatchResult(IteratorContext* ctx, IteratorStateReader* reader,
size_t index) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
batch_results_.push_back(
std::make_shared<BatchResult>(dataset()->batch_size_, ctx));
std::shared_ptr<BatchResult> result = batch_results_.back();
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
result->end_of_input = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumCalls),
&result->num_calls));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
&result->num_elements));
result->output_allocated = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated));
TF_RETURN_IF_ERROR(ReadBatch(ctx, reader, dataset()->batch_size_,
prefix(), batch_prefix, &result->output));
TF_RETURN_IF_ERROR(ReadStatus(prefix(),
strings::StrCat(batch_prefix, "_", kStatus),
reader, &result->status));
if (result->output_allocated) {
RecordBufferEnqueue(ctx, result->output);
}
return absl::OkStatus();
}
Status WriteBatchResult(IteratorStateWriter* writer, size_t index)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
std::shared_ptr<BatchResult> result = batch_results_[index];
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
if (result->end_of_input) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput), ""));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumCalls),
result->num_calls));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
result->num_elements));
if (result->output_allocated) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated),
""));
}
TF_RETURN_IF_ERROR(WriteBatch(dataset()->batch_size_,
result->num_elements, prefix(),
batch_prefix, writer, &result->output));
TF_RETURN_IF_ERROR(
WriteStatus(prefix(), strings::StrCat(batch_prefix, "_", kStatus),
result->status, writer));
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
std::unique_ptr<CancellationManager> cancellation_manager_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
int64_t call_counter_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<BatchResult>> batch_results_ TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
int64_t waiting_ TF_GUARDED_BY(*mu_) = 0;
int64_t max_batch_results_ TF_GUARDED_BY(*mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
};
const DatasetBase* const input_;
const int64_t batch_size_;
const int64_t num_parallel_calls_;
const bool drop_remainder_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const std::unique_ptr<CapturedFunction> captured_func_;
const bool preserve_cardinality_;
const TraceMeMetadata traceme_metadata_;
};
MapAndBatchDatasetOp::MapAndBatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kPreserveCardinality, &preserve_cardinality_));
}
void MapAndBatchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t batch_size = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("batch_size must be greater than zero."));
int64_t num_parallel_calls = 0;
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls));
OP_REQUIRES(
ctx, num_parallel_calls > 0 || num_parallel_calls == model::kAutotune,
errors::InvalidArgument("num_parallel_calls must be greater than zero."));
bool drop_remainder;
OP_REQUIRES_OK(ctx,
ParseScalarArgument(ctx, kDropRemainder, &drop_remainder));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
if (num_parallel_calls == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, batch_size, num_parallel_calls,
drop_remainder, output_types_, output_shapes_,
std::move(captured_func), preserve_cardinality_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("MapAndBatchDataset").Device(DEVICE_CPU),
MapAndBatchDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalMapAndBatchDataset").Device(DEVICE_CPU),
MapAndBatchDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("MapAndBatchDataset");
REGISTER_INPUT_COLOCATION_EXEMPTION("ExperimentalMapAndBatchDataset");
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "map_and_batch_dataset";
class MapAndBatchDatasetParams : public DatasetParams {
public:
template <typename T>
MapAndBatchDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int64_t batch_size, int64_t num_parallel_calls, bool drop_remainder,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
bool preserve_cardinality, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
batch_size_(batch_size),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)),
preserve_cardinality_(preserve_cardinality) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> inputs = other_arguments_;
inputs.emplace_back(CreateTensor<int64_t>(TensorShape({}), {batch_size_}));
inputs.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
inputs.emplace_back(CreateTensor<bool>(TensorShape({}), {drop_remainder_}));
return inputs;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size() + 3);
input_names->emplace_back(MapAndBatchDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(MapAndBatchDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(MapAndBatchDatasetOp::kBatchSize);
input_names->emplace_back(MapAndBatchDatasetOp::kNumParallelCalls);
input_names->emplace_back(MapAndBatchDatasetOp::kDropRemainder);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"preserve_cardinality", preserve_cardinality_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override {
return MapAndBatchDatasetOp::kDatasetType;
}
private:
std::vector<Tensor> other_arguments_;
int64_t batch_size_;
int64_t num_parallel_calls_;
bool drop_remainder_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
bool preserve_cardinality_;
};
class MapAndBatchDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MapFunc(const string& func_name,
const DataType& dtype) {
return FunctionDefHelper::FunctionRef(func_name, {{"T", dtype}});
}
MapAndBatchDatasetParams MapAndBatchDatasetParams1() {
return MapAndBatchDatasetParams(RangeDatasetParams(0, 10, 2),
{},
2,
1,
true,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams2() {
return MapAndBatchDatasetParams(RangeDatasetParams(0, 10, 2),
{},
2,
2,
true,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
true,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams3() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
3,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
true,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams4() {
return MapAndBatchDatasetParams(RangeDatasetParams(0, 10, 2),
{},
2,
4,
true,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams5() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
model::kAutotune,
true,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
true,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams6() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
4,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams InvalidNumParallelCallsMapAndBatchDatasetParams() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
-4,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams InvalidBatchSizeMapAndBatchDatasetParams() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
-2,
2,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
std::vector<GetNextTestCase<MapAndBatchDatasetParams>> GetNextTestCases() {
return {{MapAndBatchDatasetParams1(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams2(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}},
{MapAndBatchDatasetParams4(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams5(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 8}, {16, 24}})},
{MapAndBatchDatasetParams6(),
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}}};
}
ITERATOR_GET_NEXT_TEST_P(MapAndBatchDatasetOpTest, MapAndBatchDatasetParams,
GetNextTestCases())
TEST_F(MapAndBatchDatasetOpTest, DatasetNodeName) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(MapAndBatchDatasetOpTest, DatasetTypeString) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(MapAndBatchDatasetOp::kDatasetType)));
}
TEST_F(MapAndBatchDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<MapAndBatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{MapAndBatchDatasetParams1(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams2(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams3(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams4(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams5(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams6(),
{PartialTensorShape({2})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(MapAndBatchDatasetOpTest, MapAndBatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<MapAndBatchDatasetParams>>
CardinalityTestCases() {
return {{MapAndBatchDatasetParams1(),
kUnknownCardinality},
{MapAndBatchDatasetParams2(),
2},
{MapAndBatchDatasetParams3(),
3},
{MapAndBatchDatasetParams4(),
kUnknownCardinality},
{MapAndBatchDatasetParams5(),
2},
{MapAndBatchDatasetParams6(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(MapAndBatchDatasetOpTest, MapAndBatchDatasetParams,
CardinalityTestCases())
TEST_F(MapAndBatchDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<MapAndBatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{MapAndBatchDatasetParams1(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams2(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams3(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams4(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams5(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams6(),
{PartialTensorShape({2})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(MapAndBatchDatasetOpTest,
MapAndBatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(MapAndBatchDatasetOpTest, IteratorPrefix) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
MapAndBatchDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<MapAndBatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{MapAndBatchDatasetParams1(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams2(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams3(),
{0, 1, 4},
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}},
{MapAndBatchDatasetParams4(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams5(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 8}, {16, 24}})},
{MapAndBatchDatasetParams6(),
{0, 1, 4},
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(MapAndBatchDatasetOpTest,
MapAndBatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(MapAndBatchDatasetOpTest, InvalidBatchSize) {
auto dataset_params = InvalidBatchSizeMapAndBatchDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(MapAndBatchDatasetOpTest, InvalidNumParallel) {
auto dataset_params = InvalidNumParallelCallsMapAndBatchDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a72657de-abb7-47e8-b728-80c0d4462856 | cpp | tensorflow/tensorflow | unique_dataset_op | tensorflow/core/kernels/data/experimental/unique_dataset_op.cc | tensorflow/core/kernels/data/experimental/unique_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/unique_dataset_op.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/hash/hash.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const UniqueDatasetOp::kDatasetType;
constexpr const char* const UniqueDatasetOp::kInputDataset;
constexpr const char* const UniqueDatasetOp::kOutputTypes;
constexpr const char* const UniqueDatasetOp::kOutputShapes;
class UniqueDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), input_(input) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, strings::StrCat(prefix, "::Unique")});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return strings::StrCat("UniqueDatasetOp::Dataset");
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const typename Iterator::Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
bool saw_new_value;
do {
saw_new_value = false;
out_tensors->clear();
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
break;
}
DCHECK_EQ(1, out_tensors->size());
saw_new_value = unique_elements_.insert((*out_tensors)[0]).second;
} while (!saw_new_value);
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeUnknownRatioNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name("input_impl_empty"), ""));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("unique_elements_size"),
unique_elements_.size()));
size_t i = 0;
for (const Tensor& t : unique_elements_) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
full_name(strings::StrCat("unique_elements[", i++, "]")), t));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (!reader->Contains(full_name("input_impl_empty"))) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
int64_t num_unique_elements;
unique_elements_.clear();
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("unique_elements_size"),
&num_unique_elements));
for (int64_t i = 0; i < num_unique_elements; ++i) {
Tensor unique_element;
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), full_name(strings::StrCat("unique_elements[", i, "]")),
&unique_element));
auto insert_result = unique_elements_.insert(unique_element);
if (!insert_result.second) {
return errors::InvalidArgument(
"Checkpoint contained two unique elements with the same "
"value.");
}
}
return absl::OkStatus();
}
private:
struct TensorHash {
size_t operator()(const Tensor& t) const {
if (t.dtype() == DT_INT32 || t.dtype() == DT_INT64) {
return Hash64(t.tensor_data().data(), t.tensor_data().size());
} else {
DCHECK_EQ(DT_STRING, t.dtype());
auto flat_t = t.flat<tstring>();
uint64 hash = 0;
for (int64_t i = 0; i < t.NumElements(); ++i) {
hash = Hash64Combine(hash, Hash64(flat_t(i)));
}
return static_cast<size_t>(hash);
}
}
};
struct TensorKeyEqual {
bool operator()(const Tensor& lhs, const Tensor& rhs) const {
if (lhs.shape() != rhs.shape() || lhs.dtype() != rhs.dtype()) {
return false;
}
switch (lhs.dtype()) {
#define HANDLE_TYPE(T) \
case T: \
do { \
auto lhs_flat = lhs.flat<EnumToDataType<T>::Type>(); \
auto rhs_flat = rhs.flat<EnumToDataType<T>::Type>(); \
for (int64_t i = 0; i < lhs.NumElements(); ++i) { \
if (lhs_flat(i) != rhs_flat(i)) { \
return false; \
} \
} \
return true; \
} while (0)
HANDLE_TYPE(DT_INT32);
HANDLE_TYPE(DT_INT64);
HANDLE_TYPE(DT_STRING);
default:
DCHECK(false) << "UniqueDataset unhandled data type: "
<< DataTypeString(lhs.dtype());
return false;
}
}
};
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unordered_set<Tensor, TensorHash, TensorKeyEqual> unique_elements_
TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
};
void UniqueDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
OP_REQUIRES(ctx, input->output_dtypes().size() == 1,
errors::InvalidArgument("UniqueDataset only supports "
"inputs with a single component."));
DataType input_dtype = input->output_dtypes()[0];
OP_REQUIRES(ctx,
input_dtype == DT_INT32 || input_dtype == DT_INT64 ||
input_dtype == DT_STRING,
errors::InvalidArgument(
"UniqueDataset only supports inputs with a single "
"`tf.int32`, `tf.int64`, or `tf.string` component."));
*output = new Dataset(ctx, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("UniqueDataset").Device(DEVICE_CPU),
UniqueDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalUniqueDataset").Device(DEVICE_CPU),
UniqueDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/unique_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "unique_dataset";
class UniqueDatasetParams : public DatasetParams {
public:
template <typename T>
UniqueDatasetParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
kNodeName) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(UniqueDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attributes) const override {
*attributes = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return UniqueDatasetOp::kDatasetType; }
};
class UniqueDatasetOpTest : public DatasetOpsTestBase {};
UniqueDatasetParams NormalCaseParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{12, 1},
{1, 1, 2, 3, 5, 8, 13, 3, 21, 8, 8, 34})},
"tensor_slice_dataset");
return UniqueDatasetParams(tensor_slice_dataset_params,
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams LastRecordIsDuplicateParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{11, 1},
{1, 1, 2, 3, 5, 8, 13, 3, 21, 8, 8})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams AllRecordsTheSameParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{5, 1}, {1, 1, 1, 1, 1})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams EmptyInputParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0, 1}, {})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams StringParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(
TensorShape{11, 1},
{"one", "One", "two", "three", "five", "eight", "thirteen",
"twenty-one", "eight", "eight", "thirty-four"})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_STRING},
{PartialTensorShape({1})});
}
UniqueDatasetParams TwoComponentsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{
CreateTensor<int64_t>(TensorShape{1, 1}, {1}),
CreateTensor<int64_t>(TensorShape{1, 1}, {42}),
},
"tensor_slice_dataset");
return UniqueDatasetParams(
std::move(tensor_slice_dataset_params),
{DT_INT64, DT_INT64},
{PartialTensorShape({1}), PartialTensorShape({1})});
}
UniqueDatasetParams NoInputParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({})});
}
UniqueDatasetParams FP32Params() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<float>(TensorShape{1, 1}, {3.14})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_FLOAT},
{PartialTensorShape({1})});
}
std::vector<GetNextTestCase<UniqueDatasetParams>> GetNextTestCases() {
return {{NormalCaseParams(),
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}, {34}})},
{LastRecordIsDuplicateParams(),
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}})},
{AllRecordsTheSameParams(),
CreateTensors<int64_t>(TensorShape({1}), {{1}})},
{EmptyInputParams(),
CreateTensors<int64_t>(TensorShape({1}), {})},
{StringParams(),
CreateTensors<tstring>(TensorShape({1}), {{"one"},
{"One"},
{"two"},
{"three"},
{"five"},
{"eight"},
{"thirteen"},
{"twenty-one"},
{"thirty-four"}})}};
}
ITERATOR_GET_NEXT_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
GetNextTestCases())
TEST_F(UniqueDatasetOpTest, DatasetNodeName) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(UniqueDatasetOpTest, DatasetTypeString) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(UniqueDatasetOp::kDatasetType)));
}
TEST_F(UniqueDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(UniqueDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({1})}));
}
std::vector<CardinalityTestCase<UniqueDatasetParams>> CardinalityTestCases() {
return {{NormalCaseParams(),
kUnknownCardinality},
{EmptyInputParams(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<UniqueDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{NormalCaseParams(),
{DT_INT64}},
{StringParams(),
{DT_STRING}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<UniqueDatasetParams>>
IteratorOutputShapesTestCases() {
return {{NormalCaseParams(),
{PartialTensorShape({1})}},
{StringParams(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(UniqueDatasetOpTest, IteratorPrefix) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
UniqueDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<UniqueDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{NormalCaseParams(),
{0, 2, 6, 8},
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}, {34}})},
{LastRecordIsDuplicateParams(),
{0, 2, 6, 8},
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidInputTest
: public UniqueDatasetOpTest,
public ::testing::WithParamInterface<UniqueDatasetParams> {};
TEST_P(ParameterizedInvalidInputTest, InvalidInput) {
auto dataset_params = GetParam();
auto result = Initialize(dataset_params);
EXPECT_FALSE(result.ok());
}
INSTANTIATE_TEST_SUITE_P(FilterDatasetOpTest, ParameterizedInvalidInputTest,
::testing::ValuesIn({TwoComponentsParams(),
NoInputParams(), FP32Params()}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/unique_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/unique_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac9dcc13-70b4-4bb8-b8e3-89f9de54e3a5 | cpp | tensorflow/tensorflow | nccl_manager | tensorflow/core/nccl/nccl_manager.cc | tensorflow/core/nccl/nccl_manager_test.cc | #include "tensorflow/core/nccl/nccl_manager.h"
#include <utility>
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "absl/base/call_once.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#if GOOGLE_CUDA
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#elif TENSORFLOW_USE_ROCM
#include "tensorflow/core/platform/rocm.h"
#endif
namespace tensorflow {
using stream_executor::gpu::ScopedActivateContext;
#if TENSORFLOW_USE_ROCM
#define cudaError_t hipError_t
#define cudaStream_t hipStream_t
#define cudaGetErrorString hipGetErrorString
#define cudaGetDevice hipGetDevice
#define cudaSetDevice hipSetDevice
#define cudaSuccess hipSuccess
int NcclManager::instance_count = 0;
#endif
#define NCCL_RETURN_IF_ERROR(...) \
do { \
ncclResult_t nccl_status = (__VA_ARGS__); \
if (nccl_status != ncclSuccess) { \
return errors::Internal("NCCL: ", ncclGetErrorString(nccl_status), \
". Set NCCL_DEBUG=WARN for detail."); \
} \
} while (0)
#define CUDA_RETURN_IF_ERROR(...) \
do { \
cudaError_t cuda_status = (__VA_ARGS__); \
if (cuda_status != cudaSuccess) { \
return errors::Internal("CUDA: ", cudaGetErrorString(cuda_status)); \
} \
} while (0)
struct NcclManager::NcclStream : public core::RefCounted {
public:
NcclStream() = default;
~NcclStream() = default;
se::StreamExecutor* executor = nullptr;
#if TENSORFLOW_USE_ROCM
se::Stream* stream = nullptr;
#else
std::unique_ptr<se::Stream> stream;
#endif
mutex mu;
condition_variable cv;
std::deque<std::pair<Collective*, int>> pending_launches_ TF_GUARDED_BY(mu);
bool shutdown_requested TF_GUARDED_BY(mu) = false;
};
struct NcclManager::CommunicatorMember {
public:
CommunicatorMember() {}
~CommunicatorMember() {
if (nccl_comm != nullptr) ncclCommDestroy(nccl_comm);
}
ncclComm_t nccl_comm = nullptr;
NcclStream* nccl_stream = nullptr;
};
struct NcclManager::Communicator {
public:
explicit Communicator(std::vector<CommunicatorMember> members,
const string& key)
: num_devices(members.size()), members(std::move(members)), key(key) {}
const int num_devices;
std::vector<CommunicatorMember> members;
const string key;
};
namespace {
static constexpr DataTypeSet kValidDataTypes =
ToSet(DT_HALF) | ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT32) |
ToSet(DT_INT64);
ncclDataType_t ToNcclType(DataType t) {
switch (t) {
case DT_HALF:
return ncclHalf;
case DT_FLOAT:
return ncclFloat;
case DT_DOUBLE:
return ncclDouble;
case DT_INT32:
return ncclInt;
case DT_INT64:
return ncclInt64;
default:
return ncclFloat;
}
}
void StringToNcclUniqueId(const string& str_id, ncclUniqueId* nccl_id) {
if (str_id.size() == NCCL_UNIQUE_ID_BYTES) {
memcpy(nccl_id->internal, str_id.data(), NCCL_UNIQUE_ID_BYTES);
}
}
}
struct NcclManager::Collective : public core::RefCounted {
Collective(const string& collective_key_in, DataType data_type_in,
CollectiveType type_in, ncclRedOp_t reduction_op_in,
int num_local_devices_in, int num_global_devices_in,
const string& communicator_key_in)
: collective_key(collective_key_in),
data_type(data_type_in),
type(type_in),
reduction_op(reduction_op_in),
num_local_devices(num_local_devices_in),
num_global_devices(num_global_devices_in),
single_node(num_local_devices_in == num_global_devices_in),
communicator_key(communicator_key_in) {
participants.reserve(num_local_devices_in);
#if TENSORFLOW_USE_ROCM
if (NcclManager::instance_count > 1) {
status = errors::Internal(
"ROCm cannot use multi-node NCCL collectives on a single node");
}
#endif
}
const string collective_key;
const DataType data_type;
const CollectiveType type;
const ncclRedOp_t reduction_op;
const int num_local_devices;
const int num_global_devices;
const bool single_node;
const string communicator_key;
Communicator* communicator = nullptr;
std::vector<std::unique_ptr<Participant>> participants;
int root_rank = -1;
int available_participants = 0;
bool multi_node_ready = false;
uint64 trace_context = 0;
Status status;
};
NcclManager::NcclManager() {
VLOG(2) << "New NcclManager " << this;
#if TENSORFLOW_USE_ROCM
++instance_count;
#endif
}
NcclManager::~NcclManager() {
VLOG(2) << "~NcclManager " << this;
#if TENSORFLOW_USE_ROCM
--instance_count;
#endif
for (auto& it : device_to_comm_streams_) {
for (NcclStream* nccl_stream : it.second) {
{
mutex_lock l(nccl_stream->mu);
nccl_stream->shutdown_requested = true;
nccl_stream->cv.notify_all();
}
nccl_stream->Unref();
}
}
}
NcclManager* NcclManager::instance() {
static NcclManager* instance = new NcclManager();
#if TENSORFLOW_USE_ROCM
static absl::once_flag once;
absl::call_once(once, [] { --NcclManager::instance_count; });
#endif
return instance;
}
string NcclManager::GenerateCommunicatorKey() {
ncclUniqueId nccl_id;
ncclGetUniqueId(&nccl_id);
return string(nccl_id.internal, NCCL_UNIQUE_ID_BYTES);
}
Status NcclManager::GetCommunicator(NcclManager::Collective* collective,
NcclManager::Communicator** communicator) {
std::sort(collective->participants.begin(), collective->participants.end(),
[](const std::unique_ptr<Participant>& a,
const std::unique_ptr<Participant>& b) {
if (a->gpu_device_id != b->gpu_device_id) {
return a->gpu_device_id < b->gpu_device_id;
}
if (a->executor != b->executor) {
return a->executor < b->executor;
}
return a->global_rank < b->global_rank;
});
mutex_lock l(mu_);
if (!status_.ok()) {
return status_;
}
if (collective->communicator_key.empty()) {
for (auto& comm : communicators_) {
if (comm->num_devices == collective->num_global_devices) {
int i;
for (i = 0; i < collective->num_local_devices; ++i) {
if (comm->members[i].nccl_stream->executor !=
collective->participants[i]->executor) {
break;
}
}
if (i == collective->num_local_devices) {
*communicator = comm.get();
return OkStatus();
}
}
}
} else {
#if NCCL_MAJOR < 2
return errors::Internal(
"Cannot use multi-node NCCL collectives with NCCL 1.x");
#endif
if (collective->communicator_key.size() != NCCL_UNIQUE_ID_BYTES) {
return errors::Internal("Expected communicator_key of size ",
NCCL_UNIQUE_ID_BYTES, " but found size ",
collective->communicator_key.size());
}
for (auto& comm : communicators_) {
if (comm->key == collective->communicator_key) {
*communicator = comm.get();
return OkStatus();
}
}
}
auto* env = Env::Default();
std::set<NcclStream*> used_streams;
std::vector<CommunicatorMember> members(collective->num_local_devices);
std::vector<int> devices(collective->num_local_devices);
for (int i = 0; i < collective->num_local_devices; ++i) {
auto* executor = collective->participants[i]->executor;
auto& streams = device_to_comm_streams_[executor];
NcclStream* nccl_stream = nullptr;
for (const auto& s : streams) {
if (used_streams.insert(s).second) {
nccl_stream = s;
break;
}
}
if (nccl_stream == nullptr) {
nccl_stream = new NcclStream();
nccl_stream->executor = executor;
#if TENSORFLOW_USE_ROCM
nccl_stream->stream = collective->participants[i]->context->nccl_stream();
#else
TF_ASSIGN_OR_RETURN(auto stream, executor->CreateStream());
nccl_stream->stream = std::move(stream);
#endif
streams.emplace_back(nccl_stream);
used_streams.insert(nccl_stream);
nccl_stream->Ref();
env->SchedClosure([this, nccl_stream]() {
LoopKernelLaunches(nccl_stream);
nccl_stream->Unref();
});
}
members[i].nccl_stream = nccl_stream;
devices[i] = collective->participants[i]->gpu_device_id;
}
std::vector<ncclComm_t> nccl_comms(collective->num_local_devices);
VLOG(2) << "Created nccl Communicator with "
<< "num_global_devices = " << collective->num_global_devices
<< " num_local_devices = " << collective->num_local_devices
<< " communicator_key ="
<< absl::StrJoin(
std::vector<int>{collective->communicator_key.begin(),
collective->communicator_key.end()},
" ");
#if NCCL_MAJOR >= 2
ncclUniqueId nccl_id;
if (collective->single_node) {
NCCL_RETURN_IF_ERROR(ncclGetUniqueId(&nccl_id));
} else {
StringToNcclUniqueId(collective->communicator_key, &nccl_id);
}
int saved_device = 0;
CUDA_RETURN_IF_ERROR(cudaGetDevice(&saved_device));
NCCL_RETURN_IF_ERROR(ncclGroupStart());
for (int i = 0; i < collective->num_local_devices; ++i) {
const int rank = collective->participants[i]->global_rank >= 0
? collective->participants[i]->global_rank
: i;
CUDA_RETURN_IF_ERROR(cudaSetDevice(devices[i]));
NCCL_RETURN_IF_ERROR(ncclCommInitRank(
nccl_comms.data() + i, collective->num_global_devices, nccl_id, rank));
}
NCCL_RETURN_IF_ERROR(ncclGroupEnd());
CUDA_RETURN_IF_ERROR(cudaSetDevice(saved_device));
#else
NCCL_RETURN_IF_ERROR(ncclCommInitAll(
nccl_comms.data(), collective->num_local_devices, devices.data()));
#endif
for (int i = 0; i < collective->num_local_devices; ++i) {
members[i].nccl_comm = nccl_comms[i];
}
communicators_.emplace_back(
new Communicator(std::move(members), collective->communicator_key));
*communicator = communicators_.back().get();
return OkStatus();
}
void NcclManager::AddToAllReduce(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kAllReduce, reduction_op);
}
void NcclManager::AddToAllGather(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kAllGather,
ncclSum );
}
void NcclManager::AddToReduceScatter(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kReduceScatter, reduction_op);
}
void NcclManager::AddToAllToAll(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kAllToAll,
ncclSum );
}
void NcclManager::AddBroadcastSend(std::unique_ptr<Participant> participant,
const Context& context) {
participant->root = true;
AddParticipant(std::move(participant), context, kBroadcast,
ncclSum );
}
void NcclManager::AddBroadcastRecv(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kBroadcast,
ncclSum );
}
void NcclManager::AddReduceSend(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kReduce, reduction_op);
}
void NcclManager::AddReduceRecv(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
participant->root = true;
AddParticipant(std::move(participant), context, kReduce, reduction_op);
}
void NcclManager::SignalMultiNodeReady(const string& collective_key) {
Collective* to_run = nullptr;
{
mutex_lock l(mu_);
auto collective_it = collectives_.find(collective_key);
if (collective_it != collectives_.end()) {
Collective* collective = collective_it->second;
collective->multi_node_ready = true;
if (CheckReady(collective_key, collective)) {
to_run = collective;
}
VLOG(2) << "SignalMultiNodeReady collective " << collective_key
<< " to_run " << to_run;
}
}
if (to_run != nullptr) RunCollective(to_run);
}
void NcclManager::AddParticipant(std::unique_ptr<Participant> participant,
const Context& context,
CollectiveType collective_type,
ncclRedOp_t reduction_op) {
Collective* to_run = nullptr;
DataType data_type;
Status nccl_manager_status;
if (participant->input != nullptr) {
data_type = participant->input->dtype();
} else {
data_type = participant->output->dtype();
}
{
mutex_lock l(mu_);
nccl_manager_status = status_;
if (nccl_manager_status.ok()) {
auto collective_it = collectives_.find(context.collective_key);
Collective* collective = nullptr;
if (collective_it == collectives_.end()) {
collective = new Collective(
context.collective_key, data_type, collective_type, reduction_op,
context.num_local_devices, context.num_global_devices,
context.communicator_key);
collectives_.emplace(context.collective_key, collective);
} else {
collective = collective_it->second;
}
if (collective->status.ok() && !collective->single_node &&
collective->communicator_key.empty()) {
collective->status = errors::Internal(
"Collective ", reduction_op,
" is multi node with num_local_devices=",
collective->num_local_devices,
" and num_global_devices=", collective->num_global_devices,
" but has an empty communicator_key");
}
if (collective->status.ok() && collective->communicator_key.size() !=
context.communicator_key.size()) {
collective->status =
errors::Internal("Collective ", reduction_op,
" mismatch in member communicator_key with size ",
collective->communicator_key.size(),
" and arg communicator_key with size ",
context.communicator_key.size());
}
if (collective->status.ok() && collective->type != collective_type) {
collective->status = errors::Internal(
"Collective ", reduction_op, " previously initialized with type ",
collective->type, " but now got type ", collective_type);
}
if (collective->status.ok() &&
collective->num_global_devices != context.num_global_devices) {
collective->status =
errors::Internal("Collective ", reduction_op,
" previously initialized with num_global_devices ",
collective->num_global_devices, " but now got ",
context.num_global_devices);
}
if (collective->status.ok() &&
collective->num_local_devices != context.num_local_devices) {
collective->status =
errors::Internal("Collective ", reduction_op,
"previously initialized with num_local_devices ",
collective->num_local_devices, " but now got ",
context.num_local_devices);
}
if (collective->status.ok() &&
collective->participants.size() >= collective->num_local_devices) {
collective->status = errors::Internal(
"Collective ", reduction_op, " expected ",
collective->num_local_devices, " participants but now has ",
collective->participants.size(),
" with one more participant being added");
}
if (collective->status.ok() && collective->root_rank >= 0 &&
context.source_rank >= 0 &&
collective->root_rank != context.source_rank) {
collective->status = errors::Internal(
"Collective ", collective->collective_key,
" already has root_rank ", collective->root_rank,
" but new participant has root_rank ", context.source_rank);
}
if (collective->status.ok() &&
!kValidDataTypes.Contains(collective->data_type)) {
collective->status = errors::Internal(
"Collective ", collective->collective_key,
" expected data types compatible with NCCL but instead got ",
DataTypeString(collective->data_type));
}
if (context.source_rank >= 0) {
collective->root_rank = context.source_rank;
}
collective->participants.emplace_back(std::move(participant));
++collective->available_participants;
if (CheckReady(context.collective_key, collective)) {
to_run = collective;
}
}
}
if (!nccl_manager_status.ok()) {
participant->done_callback(nccl_manager_status);
return;
}
if (to_run != nullptr) RunCollective(to_run);
}
bool NcclManager::CheckReady(const string& collective_key,
Collective* collective) {
if (collective->available_participants == collective->num_local_devices) {
if (collective->num_global_devices == collective->num_local_devices ||
collective->multi_node_ready) {
collectives_.erase(collective_key);
return true;
}
}
return false;
}
void NcclManager::RunCollective(Collective* collective) {
tensorflow::profiler::TraceMeProducer traceme("Schedule Collective");
collective->trace_context = traceme.GetContextId();
static mutex collective_mu(LINKER_INITIALIZED);
Status status = collective->status;
if (status.ok()) {
status = GetCommunicator(collective, &collective->communicator);
}
for (int i = 0; status.ok() && i < collective->num_local_devices; ++i) {
Participant* p = collective->participants[i].get();
NcclStream* nccl_stream = collective->communicator->members[i].nccl_stream;
CHECK(nccl_stream != nullptr);
const int rank = p->global_rank >= 0 ? p->global_rank : i;
if (p->input != nullptr) {
status = nccl_stream->stream->WaitFor(p->tensor_stream);
}
if (p->root) {
if (collective->root_rank == -1) {
collective->root_rank = rank;
} else if (collective->root_rank != rank) {
status = errors::Internal(
"Inconsistent root rank ", collective->root_rank, " and GPU id ",
p->gpu_device_id, " rank ", rank, " also marked as root.");
}
}
VLOG(2) << "RunCollective rank " << rank << " global_rank "
<< p->global_rank << " root_rank " << collective->root_rank;
}
if (status.ok() && collective->type == kBroadcast &&
collective->root_rank < 0) {
status = errors::Internal("Root rank not indicated for collective ",
collective->collective_key);
}
if (!status.ok()) {
for (int i = 0; i < collective->num_local_devices; ++i) {
collective->participants[i]->done_callback(status);
}
collective->Unref();
return;
}
{
mutex_lock l(collective_mu);
for (int i = 0; i < collective->num_local_devices; ++i) {
NcclStream* nccl_stream =
collective->communicator->members[i].nccl_stream;
mutex_lock l(nccl_stream->mu);
nccl_stream->pending_launches_.push_front(std::make_pair(collective, i));
collective->Ref();
nccl_stream->cv.notify_all();
}
}
collective->Unref();
}
namespace {
size_t ComputeBufferSize(const NcclManager::Participant* p,
DataType data_type) {
size_t num_elements = 0;
if (p->output) {
num_elements += p->output->NumElements();
} else if (p->input) {
num_elements += p->input->NumElements();
}
return num_elements * DataTypeSize(data_type);
}
}
void NcclManager::LoopKernelLaunches(NcclStream* nccl_stream) {
#if TENSORFLOW_USE_ROCM
se::Stream* comm_stream = nccl_stream->stream;
#else
se::Stream* comm_stream = nccl_stream->stream.get();
#endif
ScopedActivateContext scoped_context(nccl_stream->executor);
cudaStream_t cu_stream = reinterpret_cast<cudaStream_t>(
comm_stream->platform_specific_handle().stream);
while (true) {
std::pair<Collective*, int> next_launch;
{
VLOG(3) << "Locking mutex nccl_stream " << nccl_stream;
mutex_lock l(nccl_stream->mu);
while (nccl_stream->pending_launches_.empty()) {
if (nccl_stream->shutdown_requested) {
return;
}
nccl_stream->cv.wait(l);
}
next_launch = nccl_stream->pending_launches_.back();
nccl_stream->pending_launches_.pop_back();
}
Collective* collective = next_launch.first;
tensorflow::profiler::TraceMeConsumer traceme("Run Collective",
collective->trace_context);
ncclDataType_t data_type = ToNcclType(collective->data_type);
int p_idx = next_launch.second;
Participant* p = collective->participants[p_idx].get();
auto nccl_comm = collective->communicator->members[p_idx].nccl_comm;
ncclResult_t nccl_result = ncclSuccess;
switch (collective->type) {
case kAllReduce: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff = const_cast<char*>(p->output->tensor_data().data());
VLOG(2) << "call NcclAllReduce collective_key "
<< collective->collective_key << " participant " << p_idx
<< " num_participants " << collective->participants.size()
<< " sendbuff " << sendbuff << " recvbuff " << recvbuff
<< " nccl_comm " << nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclAllReduce",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "all_reduce"}});
});
nccl_result = ncclAllReduce(sendbuff, recvbuff, p->input->NumElements(),
data_type, collective->reduction_op,
nccl_comm, cu_stream);
break;
}
case kBroadcast: {
const void* sendbuff = nullptr;
void* recvbuff = nullptr;
int num_elements = -1;
if (p->input) {
sendbuff = p->input->tensor_data().data();
num_elements = p->input->NumElements();
}
if (p->output) {
recvbuff = const_cast<char*>(p->output->tensor_data().data());
num_elements = p->output->NumElements();
} else {
recvbuff = const_cast<void*>(sendbuff);
}
if (num_elements < 0) {
p->done_callback(errors::Internal(
"Both input and output are null in ncclBroadcast"));
collective->Unref();
continue;
}
VLOG(2) << "call NcclBroadcast collective_key "
<< collective->collective_key << " participant " << p_idx
<< " sendbuff " << sendbuff << " recvbuff " << recvbuff
<< " nccl_comm " << nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclBroadcast",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "broadcast"}});
});
nccl_result =
ncclBroadcast(sendbuff, recvbuff, num_elements, data_type,
collective->root_rank, nccl_comm, cu_stream);
break;
}
case kReduce: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff =
p->output ? const_cast<char*>(p->output->tensor_data().data())
: nullptr;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"buffer_size",
{{"output_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "reduce"}});
});
nccl_result = ncclReduce(sendbuff, recvbuff, p->input->NumElements(),
data_type, collective->reduction_op,
collective->root_rank, nccl_comm, cu_stream);
break;
}
case kAllGather: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff = const_cast<char*>(p->output->tensor_data().data());
VLOG(2) << "call NcclAllGather collective_key "
<< collective->collective_key << " participant " << p_idx
<< " sendbuff " << sendbuff << " sendcount "
<< p->input->NumElements() << " recvbuff " << recvbuff
<< " recvcount " << p->output->NumElements() << " nccl_comm "
<< nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclAllGather",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "all_gather"}});
});
nccl_result = ncclAllGather(sendbuff, recvbuff, p->input->NumElements(),
data_type, nccl_comm, cu_stream);
break;
}
case kReduceScatter: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff = const_cast<char*>(p->output->tensor_data().data());
VLOG(2) << "call NcclReduceScatter collective_key "
<< collective->collective_key << " participant " << p_idx
<< " num_participants " << collective->participants.size()
<< " sendbuff " << sendbuff << " recvbuff " << recvbuff
<< " nccl_comm " << nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclReduceScatter",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "reduce_scatter"}});
});
nccl_result = ncclReduceScatter(
sendbuff, recvbuff, p->output->NumElements(), data_type,
collective->reduction_op, nccl_comm, cu_stream);
break;
}
case kAllToAll: {
const char* sendbuff = p->input->tensor_data().data();
char* recvbuff = const_cast<char*>(p->output->tensor_data().data());
size_t count =
p->input->NumElements() / collective->participants.size();
size_t rank_offset = count * DataTypeSize(collective->data_type);
VLOG(2) << "call Nccl All to All collective_key "
<< collective->collective_key << " participant " << p_idx
<< " num_participants " << collective->participants.size()
<< " sendbuff " << static_cast<const void*>(sendbuff)
<< " recvbuff " << static_cast<void*>(recvbuff) << " nccl_comm "
<< nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclAllToAll",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "all_to_all"}});
});
ncclGroupStart();
for (int i = 0; i < collective->participants.size(); ++i) {
ncclSend(sendbuff + i * rank_offset, count, data_type,
collective->participants[i]->global_rank, nccl_comm,
cu_stream);
ncclRecv(recvbuff + i * rank_offset, count, data_type,
collective->participants[i]->global_rank, nccl_comm,
cu_stream);
}
nccl_result = ncclGroupEnd();
break;
}
}
auto done_callback = [collective, p_idx, nccl_result]() {
VLOG(2) << "done Nccl kernel collective_key "
<< collective->collective_key << " participant " << p_idx
<< " ncclResult " << nccl_result;
if (nccl_result == ncclSuccess) {
collective->participants[p_idx]->done_callback(OkStatus());
} else {
collective->participants[p_idx]->done_callback(errors::Unknown(
"Error invoking NCCL: ", ncclGetErrorString(nccl_result)));
}
collective->Unref();
};
p->event_mgr->ThenExecute(comm_stream, done_callback);
}
}
void NcclManager::StartAbort(const Status& s) {
absl::flat_hash_map<string, Collective*> collectives;
std::vector<std::unique_ptr<Communicator>> communicators;
{
mutex_lock l(mu_);
if (!status_.ok()) {
LOG(WARNING)
<< "NcclManager already aborted, ignoring subsequent StartAbort with "
<< s;
return;
}
status_ = s;
collectives.swap(collectives_);
communicators.swap(communicators_);
}
VLOG(2) << "Aborted NcclManager " << this << " with " << collectives.size()
<< " collectives and " << communicators.size()
<< " comms with status " << s;
for (const auto& item : collectives) {
for (const std::unique_ptr<Participant>& p : item.second->participants) {
p->done_callback(s);
}
item.second->Unref();
}
UnboundedWorkQueue queue(Env::Default(), "nccl_abort");
int num_comms = 0;
for (std::unique_ptr<Communicator>& communicator : communicators) {
num_comms += communicator->members.size();
}
BlockingCounter pending(num_comms);
for (std::unique_ptr<Communicator>& communicator : communicators) {
for (CommunicatorMember& member : communicator->members) {
queue.Schedule([&member, &pending]() {
ncclCommAbort(member.nccl_comm);
member.nccl_comm = nullptr;
pending.DecrementCount();
});
}
}
pending.Wait();
}
void NcclManager::Reset() {
mutex_lock l(mu_);
status_ = Status();
VLOG(2) << "Reset NcclManager " << this;
}
}
#endif | #include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include <algorithm>
#include <random>
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/nccl_manager.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
static std::vector<std::unique_ptr<BaseGPUDevice>> GetGPUDevices() {
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory(DEVICE_GPU)
->AddDevices(SessionOptions(), "", &devices));
std::vector<std::unique_ptr<BaseGPUDevice>> gpus;
for (std::unique_ptr<Device>& device : devices) {
if (device->device_type() == "GPU") {
gpus.emplace_back(static_cast<BaseGPUDevice*>(device.release()));
}
}
return gpus;
}
template <typename Scalar>
class NcclManagerTest : public ::testing::Test {
public:
struct TestCase {
TestCase(int num_nodes, int num_ranks_per_node)
: num_nodes(num_nodes), num_ranks_per_node(num_ranks_per_node) {}
std::vector<Tensor> ins;
std::vector<Tensor> outs;
Tensor expected;
const int num_nodes;
const int num_ranks_per_node;
mutex mu;
Status final_status;
int num_completed TF_GUARDED_BY(mu) = 0;
condition_variable done_cv;
};
static void SetUpTestSuite() {
setenv("NCCL_DEBUG", "INFO", 1 );
setenv("NCCL_LAUNCH_MODE", "PARALLEL", 1 );
devices_ = new std::vector<std::unique_ptr<BaseGPUDevice>>(GetGPUDevices());
VLOG(1) << "Running test with " << devices_->size() << " gpus";
if (devices_->size() <= 1) {
LOG(FATAL) << "Cannot run NCCL test without multiple GPUs";
}
work_queue_ = new UnboundedWorkQueue(Env::Default(), "nccl_manager_test");
}
void SetUp() override {
ASSERT_GT(devices_->size(), 0) << "No GPUs found";
ASSERT_NE(work_queue_, nullptr);
}
static int32 NumGPUs() { return static_cast<int32>(devices_->size()); }
static void PopulateMultiNodeParams(int* num_nodes, int* num_ranks_per_node) {
const auto num_gpus = NumGPUs();
CHECK_GT(num_gpus, 1);
*num_nodes = 2;
if (num_gpus % 2 == 0) {
*num_ranks_per_node = num_gpus / 2;
} else {
*num_ranks_per_node = (num_gpus - 1) / 2;
}
}
static void TearDownTestSuite() {
delete devices_;
delete work_queue_;
}
TestCase* MakeReductionTestCase(int num_nodes, int num_ranks_per_node,
ncclRedOp_t reduction_op, TensorShape shape,
float value_offset) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, shape);
if (reduction_op == ncclProd) {
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(1); });
} else if (reduction_op == ncclSum) {
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(0); });
} else if (reduction_op == ncclMax) {
test::FillFn<Scalar>(&test_case->expected, [](int) { return -max_; });
} else if (reduction_op == ncclMin) {
test::FillFn<Scalar>(&test_case->expected, [](int) { return max_; });
} else {
LOG(FATAL) << "Invalid reduction_op " << reduction_op;
}
float value_scale = 0.01;
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
Tensor in_cpu(data_type_, shape);
test::FillFn<Scalar>(&in_cpu, [&](int index) {
return static_cast<Scalar>((index + 1) * value_scale + value_offset);
});
for (int j = 0; j < shape.num_elements(); ++j) {
auto in_val = in_cpu.flat<Scalar>()(j);
auto out_expr = test_case->expected.template flat<Scalar>();
if (reduction_op == ncclProd) {
out_expr(j) = out_expr(j) * in_val;
} else if (reduction_op == ncclSum) {
out_expr(j) = out_expr(j) + in_val;
} else if (reduction_op == ncclMax) {
if (in_val > out_expr(j)) {
out_expr(j) = in_val;
}
} else if (reduction_op == ncclMin) {
if (in_val < out_expr(j)) {
out_expr(j) = in_val;
}
}
}
value_scale *= 10;
test_case->ins.emplace_back(GpuAllocator(device), data_type_, shape);
test_case->outs.emplace_back(GpuAllocator(device), data_type_, shape);
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
}
}
return test_case;
}
TestCase* MakeGatherTestCase(int num_nodes, int num_ranks_per_node,
TensorShape in_shape, TensorShape out_shape) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, out_shape);
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(0); });
float value_scale = 0.01;
for (int node = 0; node < num_nodes; ++node) {
for (int i = 0; i < num_ranks_per_node; ++i) {
auto* device = GetDevice(num_ranks_per_node, node, i);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
Tensor in_cpu(data_type_, in_shape);
test::FillFn<Scalar>(&in_cpu, [&](int index) {
return static_cast<Scalar>((index + 1) * value_scale);
});
int32_t gather_idx =
(node * num_ranks_per_node + i) * in_shape.num_elements();
for (int j = 0; j < in_shape.num_elements(); ++j) {
auto in_val = in_cpu.flat<Scalar>()(j);
auto out_expr = test_case->expected.template flat<Scalar>();
out_expr(gather_idx + j) = in_val;
}
value_scale *= 10;
test_case->ins.emplace_back(GpuAllocator(device), data_type_, in_shape);
test_case->outs.emplace_back(GpuAllocator(device), data_type_,
out_shape);
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
}
}
return test_case;
}
TestCase* MakeBroadcastTestCase(int num_nodes, int num_ranks_per_node,
TensorShape shape, int src_node, int src_rank,
bool in_place) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, shape);
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(1); });
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
if (node == src_node && local_rank == src_rank) {
test_case->ins.emplace_back(GpuAllocator(device), data_type_, shape);
if (in_place) {
test_case->outs.emplace_back(test_case->ins.back());
} else {
test_case->outs.emplace_back(GpuAllocator(device), data_type_,
shape);
}
Tensor in_cpu(data_type_, shape);
test::FillFn<Scalar>(&in_cpu,
[](int) { return static_cast<Scalar>(1); });
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
auto* stream = device->tensorflow_accelerator_device_info()->stream;
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
} else {
test_case->ins.emplace_back(Tensor());
test_case->outs.emplace_back(GpuAllocator(device), data_type_, shape);
}
}
}
return test_case;
}
void WaitForTestCompletion(TestCase* test_case) {
mutex_lock l(test_case->mu);
while (test_case->num_completed != test_case->outs.size()) {
test_case->done_cv.wait(l);
}
}
void VerifyResults(TestCase* test_case) {
WaitForTestCompletion(test_case);
TF_ASSERT_OK(test_case->final_status);
for (int node = 0; node < test_case->num_nodes; ++node) {
for (int local_rank = 0; local_rank < test_case->num_ranks_per_node;
++local_rank) {
auto* device =
GetDevice(test_case->num_ranks_per_node, node, local_rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(test_case->num_ranks_per_node, node, local_rank);
const Tensor& out_gpu = test_case->outs[global_rank];
Tensor out_cpu(data_type_, out_gpu.shape());
auto out_gpu_mem = AsDeviceMemory(out_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(out_cpu.flat<Scalar>().data(), out_gpu_mem,
out_cpu.TotalBytes()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
VLOG(1) << "Verifying rank " << global_rank << " expected shape "
<< test_case->expected.shape() << " out shape "
<< out_cpu.shape();
test::ExpectClose(test_case->expected, out_cpu);
}
}
}
void VerifyError(TestCase* test_case) {
WaitForTestCompletion(test_case);
LOG(INFO) << test_case->final_status;
EXPECT_EQ(test_case->final_status.code(), error::INTERNAL);
}
NcclManager::DoneCallback CreateDoneCallback(TestCase* test_case) {
return [this, test_case](Status s) {
mutex_lock l(test_case->mu);
test_case->final_status.Update(s);
if (++test_case->num_completed == test_case->outs.size()) {
test_case->done_cv.notify_one();
}
};
}
struct NodeState {
NcclManager nccl_manager;
std::atomic<int> launched{0};
};
void RunMultiNodeAllReduceTest(const int num_nodes,
const int num_ranks_per_node) {
std::vector<NodeState> node_states(num_nodes);
RunMultiNodeAllReduceTest(node_states, num_ranks_per_node);
}
void RunMultiNodeAllReduceTest(std::vector<NodeState>& node_states,
const int num_ranks_per_node) {
const int num_nodes = node_states.size();
const int num_global_ranks = num_nodes * num_ranks_per_node;
const string collective_key = "allreduce";
const string communicator_key =
node_states[0].nccl_manager.GenerateCommunicatorKey();
for (int op = 0; op < 4; ++op) {
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(op);
std::unique_ptr<TestCase> test_case(
this->MakeReductionTestCase(num_nodes, num_ranks_per_node,
reduction_op, TensorShape({2, 3}), 0.0f));
for (int node = 0; node < num_nodes; ++node) {
auto node_fn = [this, node, num_ranks_per_node, num_global_ranks,
&node_states, &communicator_key, &collective_key,
reduction_op, &test_case] {
for (int local_rank = 0; local_rank < num_ranks_per_node;
++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(num_ranks_per_node, node, local_rank);
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[global_rank],
&test_case->outs[global_rank], global_rank,
this->CreateDoneCallback(test_case.get()));
node_states[node].nccl_manager.AddToAllReduce(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, -1},
reduction_op);
VLOG(1) << "AddToAllReduce node " << node << " global_rank "
<< global_rank;
}
node_states[node].nccl_manager.SignalMultiNodeReady(collective_key);
};
this->work_queue_->Schedule(node_fn);
}
VLOG(2) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
void RunMultiNodeBroadcastTest(const int num_nodes,
const int num_ranks_per_node,
const int src_node, const int src_local_rank,
const bool in_place) {
const int num_global_ranks = num_nodes * num_ranks_per_node;
const int src_global_rank = src_node * num_ranks_per_node + src_local_rank;
const string collective_key = "broadcast";
std::vector<NodeState> node_states(num_nodes);
const string communicator_key =
node_states[0].nccl_manager.GenerateCommunicatorKey();
std::unique_ptr<TestCase> test_case(this->MakeBroadcastTestCase(
num_nodes, num_ranks_per_node, TensorShape({5, 6}), src_node,
src_local_rank, in_place));
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto rank_fn = [this, node, num_ranks_per_node, num_global_ranks,
src_global_rank, local_rank, &node_states,
&collective_key, &communicator_key, &test_case]() {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(num_ranks_per_node, node, local_rank);
auto* input = global_rank == src_global_rank
? &test_case->ins[global_rank]
: nullptr;
auto* output = test_case->outs[global_rank].NumElements() == 0
? nullptr
: &test_case->outs[global_rank];
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, input, output, global_rank,
this->CreateDoneCallback(test_case.get()));
if (global_rank == src_global_rank) {
node_states[node].nccl_manager.AddBroadcastSend(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, src_global_rank});
} else {
node_states[node].nccl_manager.AddBroadcastRecv(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, src_global_rank});
}
if (++node_states[node].launched == num_ranks_per_node) {
node_states[node].nccl_manager.SignalMultiNodeReady(collective_key);
}
};
this->work_queue_->Schedule(std::move(rank_fn));
}
}
VLOG(2) << "Verifying results";
this->VerifyResults(test_case.get());
}
static int GlobalRank(int num_ranks_per_node, int node, int local_rank) {
return node * num_ranks_per_node + local_rank;
}
static BaseGPUDevice* GetDevice(int num_ranks_per_node, int node,
int local_rank) {
const int device_idx = GlobalRank(num_ranks_per_node, node, local_rank);
CHECK_LT(device_idx, devices_->size());
return (*devices_)[device_idx].get();
}
static UnboundedWorkQueue* work_queue_;
private:
static Allocator* GpuAllocator(BaseGPUDevice* device) {
return device->GetAllocator(AllocatorAttributes());
}
static se::DeviceMemory<Scalar> AsDeviceMemory(const Scalar* cuda_memory) {
se::DeviceMemoryBase wrapped(const_cast<Scalar*>(cuda_memory));
se::DeviceMemory<Scalar> typed(wrapped);
return typed;
}
static std::vector<std::unique_ptr<BaseGPUDevice>>* devices_;
static const DataType data_type_;
static const Scalar max_;
};
template <typename Scalar>
std::vector<std::unique_ptr<BaseGPUDevice>>* NcclManagerTest<Scalar>::devices_ =
nullptr;
template <typename Scalar>
const DataType NcclManagerTest<Scalar>::data_type_ =
DataTypeToEnum<Scalar>::value;
template <typename Scalar>
const Scalar NcclManagerTest<Scalar>::max_ =
Eigen::NumTraits<Scalar>::highest();
template <typename Scalar>
UnboundedWorkQueue* NcclManagerTest<Scalar>::work_queue_ = nullptr;
using TypeList = ::testing::Types<float, double>;
TYPED_TEST_SUITE(NcclManagerTest, TypeList);
TYPED_TEST(NcclManagerTest, BasicSumReduction) {
const int num_ranks = this->NumGPUs();
for (int op = 0; op < 4; ++op) {
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(op);
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, reduction_op,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
VLOG(2) << "rank " << rank << " device " << device->name();
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{"allreduce", num_ranks,
num_ranks, "",
-1},
reduction_op);
}
LOG(INFO) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
TYPED_TEST(NcclManagerTest, MultipleCallers) {
const int num_ranks = this->NumGPUs();
const int num_collectives_per_iteration = 10;
const int time_limit_micros = 1 * 1000 * 1000;
int64_t start = Env::Default()->NowMicros();
srand(Env::Default()->NowMicros());
for (;;) {
std::vector<std::pair<int, int>> case_and_rank;
std::vector<std::unique_ptr<typename TestFixture::TestCase>> test_cases;
for (int i = 0; i < num_collectives_per_iteration; ++i) {
test_cases.emplace_back(this->MakeReductionTestCase(
1, num_ranks, ncclSum,
TensorShape({100, i % 5 + 1, i % 3 + 1}), 1.1f * i));
for (int j = 0; j < num_ranks; ++j) {
case_and_rank.emplace_back(i, j);
}
}
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
TF_ASSERT_OK(stream->BlockHostUntilDone());
}
std::shuffle(case_and_rank.begin(), case_and_rank.end(),
std::mt19937(std::random_device()()));
mutex mu;
const int to_schedule = case_and_rank.size();
for (int i = 0; i < to_schedule; ++i) {
auto fn = [&]() {
int rank;
int test_num;
{
mutex_lock l(mu);
test_num = case_and_rank.back().first;
rank = case_and_rank.back().second;
case_and_rank.pop_back();
}
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
typename TestFixture::TestCase* test_case = test_cases[test_num].get();
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{strings::StrCat("allreduce", test_num),
num_ranks,
num_ranks,
"", -1},
ncclSum);
};
this->work_queue_->Schedule(fn);
}
VLOG(2) << "Verifying results for " << num_collectives_per_iteration
<< " collectives";
for (int i = 0; i < test_cases.size(); ++i) {
this->VerifyResults(test_cases[i].get());
}
int64_t delta = Env::Default()->NowMicros() - start;
if (delta > time_limit_micros) {
LOG(INFO) << "Ran for " << delta << " microsecs, now quitting";
break;
}
}
}
TYPED_TEST(NcclManagerTest, BasicAllGather) {
const int num_ranks = this->NumGPUs();
for (int i = 0; i < num_ranks; ++i) {
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeGatherTestCase(1, num_ranks,
TensorShape({2, 3}),
TensorShape({2 * num_ranks, 3})));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
VLOG(2) << "rank " << rank << " device " << device->name();
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllGather(
std::move(participant),
{"allgather", num_ranks,
num_ranks, "",
-1});
}
LOG(INFO) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
TYPED_TEST(NcclManagerTest, BasicBroadcast) {
this->RunMultiNodeBroadcastTest(1,
this->NumGPUs(),
0, 0,
false);
}
TYPED_TEST(NcclManagerTest, InPlaceBroadcast) {
this->RunMultiNodeBroadcastTest(1,
this->NumGPUs(),
0, 0,
true);
}
TYPED_TEST(NcclManagerTest, BroadcastWithDifferentRanks) {
for (int num_ranks = 1; num_ranks <= this->NumGPUs(); ++num_ranks) {
const int src_rank = static_cast<int>(random::New64() % num_ranks);
for (int in_place_idx = 0; in_place_idx <= 1; ++in_place_idx) {
const bool in_place = in_place_idx == 0;
this->RunMultiNodeBroadcastTest(1, num_ranks,
0, src_rank, in_place);
}
}
}
TEST(NcclManagerTest, CommunicatorKey) {
const string communicator_key =
NcclManager::instance()->GenerateCommunicatorKey();
EXPECT_EQ(communicator_key.size(), NCCL_UNIQUE_ID_BYTES);
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, MultiNode) {
int num_nodes;
int num_ranks_per_node;
this->PopulateMultiNodeParams(&num_nodes, &num_ranks_per_node);
VLOG(1) << "Calling RunMultiNodeAllReduceTest with num_nodes=" << num_nodes
<< " and num_ranks_per_node=" << num_ranks_per_node;
this->RunMultiNodeAllReduceTest(num_nodes, num_ranks_per_node);
}
#endif
TYPED_TEST(NcclManagerTest, MultiNodeSingle) {
this->RunMultiNodeAllReduceTest(1,
this->NumGPUs());
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, MultiNodeBroadcast) {
int num_nodes;
int num_ranks_per_node;
this->PopulateMultiNodeParams(&num_nodes, &num_ranks_per_node);
VLOG(1) << "Calling RunMultiNodeBroadcastTest with num_nodes=" << num_nodes
<< " and num_ranks_per_node=" << num_ranks_per_node;
this->RunMultiNodeBroadcastTest(num_nodes, num_ranks_per_node,
0, 0,
true);
}
#endif
TYPED_TEST(NcclManagerTest, ConsistentCollectiveType) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
if (rank == 0) {
NcclManager::instance()->AddToAllReduce(std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
"",
-1},
ncclSum);
} else {
NcclManager::instance()->AddBroadcastSend(
std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
"", -1});
}
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, ConsistentCommunicatorKey) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
rank == 0 ? "" : NcclManager::instance()->GenerateCommunicatorKey(),
-1},
ncclSum);
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, ConsistentNumberOfDevices) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
int num_devices = rank == 0 ? num_ranks : num_ranks + 1;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(std::move(participant),
{"bad_coll_type",
num_devices,
num_devices,
"",
-1},
ncclSum);
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, BroadcastNoSource) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeBroadcastTestCase(1, num_ranks,
TensorShape({2, 3}), -1,
-1, false));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, nullptr, &test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddBroadcastRecv(std::move(participant),
{"bcast_no_send",
num_ranks,
num_ranks,
"",
-1});
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, BroadcastMultipleSends) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeBroadcastTestCase(1, num_ranks,
TensorShape({2, 3}), -1,
-1, false));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->outs[rank],
&test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddBroadcastSend(std::move(participant),
{"bcast_multiple_send",
num_ranks,
num_ranks,
"",
-1});
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, BroadcastInconsistentSource) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeBroadcastTestCase(1, num_ranks,
TensorShape({2, 3}), -1,
-1, false));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->outs[rank],
&test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddBroadcastRecv(std::move(participant),
{"bcast_inconsistent_source",
num_ranks,
num_ranks,
"",
rank});
}
this->VerifyError(test_case.get());
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, AbortThenReset) {
using NodeState = typename TestFixture::NodeState;
using TestCase = typename TestFixture::TestCase;
const int num_nodes = 2;
std::vector<NodeState> nodes(num_nodes);
this->RunMultiNodeAllReduceTest(nodes, 1);
const string collective_key = "allreduce";
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(0);
auto node_fn = [&](TestCase* test_case, int node,
const string& communicator_key) {
auto* device = this->GetDevice( 1, node,
0);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[node],
&test_case->outs[node], node,
this->CreateDoneCallback(test_case));
nodes[node].nccl_manager.AddToAllReduce(
std::move(participant),
{collective_key, 1,
num_nodes, communicator_key,
-1},
reduction_op);
nodes[node].nccl_manager.SignalMultiNodeReady(collective_key);
};
string communicator_key = nodes[0].nccl_manager.GenerateCommunicatorKey();
{
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(
num_nodes, 1, reduction_op,
TensorShape({2, 3}), 0.0f));
for (int i = 0; i < num_nodes; ++i) {
this->work_queue_->Schedule(
[&node_fn, &test_case, i, communicator_key]() {
node_fn(test_case.get(), i, communicator_key);
});
}
this->VerifyResults(test_case.get());
}
ASSERT_GT(num_nodes, 1);
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(
num_nodes, 1, reduction_op,
TensorShape({2, 3}), 0.0f));
node_fn(test_case.get(), 0, communicator_key);
Env::Default()->SleepForMicroseconds(1000000);
for (auto& node : nodes) {
node.nccl_manager.StartAbort(errors::Unavailable("peer down"));
}
{
mutex_lock l(test_case->mu);
while (test_case->num_completed != 1) {
test_case->done_cv.wait(l);
}
}
for (auto& node : nodes) {
node.nccl_manager.Reset();
}
communicator_key = nodes[0].nccl_manager.GenerateCommunicatorKey();
{
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(
num_nodes, 1, reduction_op,
TensorShape({2, 3}), 0.0f));
for (int i = 0; i < num_nodes; ++i) {
this->work_queue_->Schedule(
[&node_fn, &test_case, i, communicator_key]() {
node_fn(test_case.get(), i, communicator_key);
});
}
this->VerifyResults(test_case.get());
}
}
#endif
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/nccl/nccl_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/nccl/nccl_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c39691e-fb22-41a1-8231-568386ff230a | cpp | tensorflow/tensorflow | debug_events_writer | tensorflow/core/util/debug_events_writer.cc | tensorflow/core/util/debug_events_writer_test.cc | #include "tensorflow/core/util/debug_events_writer.h"
#include <deque>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace tfdbg {
namespace {
void MaybeSetDebugEventTimestamp(DebugEvent* debug_event, Env* env) {
if (debug_event->wall_time() == 0) {
debug_event->set_wall_time(env->NowMicros() / 1e6);
}
}
}
SingleDebugEventFileWriter::SingleDebugEventFileWriter(const string& file_path)
: env_(Env::Default()),
file_path_(file_path),
num_outstanding_events_(0),
writer_mu_() {}
Status SingleDebugEventFileWriter::Init() {
if (record_writer_ != nullptr) {
return absl::OkStatus();
}
record_writer_.reset();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env_->NewWritableFile(file_path_, &writable_file_),
"Creating writable file ", file_path_);
record_writer_ = std::make_unique<io::RecordWriter>(writable_file_.get());
if (record_writer_ == nullptr) {
return errors::Unknown("Could not create record writer at path: ",
file_path_);
}
num_outstanding_events_.store(0);
VLOG(1) << "Successfully opened debug events file: " << file_path_;
return absl::OkStatus();
}
void SingleDebugEventFileWriter::WriteSerializedDebugEvent(
StringPiece debug_event_str) {
if (record_writer_ == nullptr) {
if (!Init().ok()) {
LOG(ERROR) << "Write failed because file could not be opened.";
return;
}
}
num_outstanding_events_.fetch_add(1);
{
mutex_lock l(writer_mu_);
record_writer_->WriteRecord(debug_event_str).IgnoreError();
}
}
Status SingleDebugEventFileWriter::Flush() {
const int num_outstanding = num_outstanding_events_.load();
if (num_outstanding == 0) {
return absl::OkStatus();
}
if (writable_file_ == nullptr) {
return errors::Unknown("Unexpected NULL file for path: ", file_path_);
}
{
mutex_lock l(writer_mu_);
TF_RETURN_WITH_CONTEXT_IF_ERROR(record_writer_->Flush(), "Failed to flush ",
num_outstanding, " debug events to ",
file_path_);
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(writable_file_->Sync(), "Failed to sync ",
num_outstanding, " debug events to ",
file_path_);
num_outstanding_events_.store(0);
return absl::OkStatus();
}
Status SingleDebugEventFileWriter::Close() {
Status status = Flush();
if (writable_file_ != nullptr) {
Status close_status = writable_file_->Close();
if (!close_status.ok()) {
status = close_status;
}
record_writer_.reset(nullptr);
writable_file_.reset(nullptr);
}
num_outstanding_events_ = 0;
return status;
}
const string SingleDebugEventFileWriter::FileName() { return file_path_; }
mutex DebugEventsWriter::factory_mu_(LINKER_INITIALIZED);
DebugEventsWriter::~DebugEventsWriter() { Close().IgnoreError(); }
DebugEventsWriter* DebugEventsWriter::GetDebugEventsWriter(
const string& dump_root, const string& tfdbg_run_id,
int64_t circular_buffer_size) {
mutex_lock l(DebugEventsWriter::factory_mu_);
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>* writer_pool =
DebugEventsWriter::GetDebugEventsWriterMap();
if (writer_pool->find(dump_root) == writer_pool->end()) {
std::unique_ptr<DebugEventsWriter> writer(
new DebugEventsWriter(dump_root, tfdbg_run_id, circular_buffer_size));
writer_pool->insert(std::make_pair(dump_root, std::move(writer)));
}
return (*writer_pool)[dump_root].get();
}
Status DebugEventsWriter::LookUpDebugEventsWriter(
const string& dump_root, DebugEventsWriter** debug_events_writer) {
mutex_lock l(DebugEventsWriter::factory_mu_);
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>* writer_pool =
DebugEventsWriter::GetDebugEventsWriterMap();
if (writer_pool->find(dump_root) == writer_pool->end()) {
return errors::FailedPrecondition(
"No DebugEventsWriter has been created at dump root ", dump_root);
}
*debug_events_writer = (*writer_pool)[dump_root].get();
return absl::OkStatus();
}
Status DebugEventsWriter::Init() {
mutex_lock l(initialization_mu_);
if (is_initialized_) {
return absl::OkStatus();
}
if (!env_->IsDirectory(dump_root_).ok()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(env_->RecursivelyCreateDir(dump_root_),
"Failed to create directory ", dump_root_);
}
int64_t time_in_seconds = env_->NowMicros() / 1e6;
file_prefix_ = io::JoinPath(
dump_root_, strings::Printf("%s.%010lld.%s", kFileNamePrefix,
static_cast<long long>(time_in_seconds),
port::Hostname().c_str()));
TF_RETURN_IF_ERROR(InitNonMetadataFile(SOURCE_FILES));
TF_RETURN_IF_ERROR(InitNonMetadataFile(STACK_FRAMES));
TF_RETURN_IF_ERROR(InitNonMetadataFile(GRAPHS));
metadata_writer_.reset();
string metadata_filename = GetFileNameInternal(METADATA);
metadata_writer_ =
std::make_unique<SingleDebugEventFileWriter>(metadata_filename);
if (metadata_writer_ == nullptr) {
return errors::Unknown("Could not create debug event metadata file writer");
}
DebugEvent debug_event;
DebugMetadata* metadata = debug_event.mutable_debug_metadata();
metadata->set_tensorflow_version(TF_VERSION_STRING);
metadata->set_file_version(
strings::Printf("%s%d", kVersionPrefix, kCurrentFormatVersion));
metadata->set_tfdbg_run_id(tfdbg_run_id_);
TF_RETURN_IF_ERROR(SerializeAndWriteDebugEvent(&debug_event, METADATA));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
metadata_writer_->Flush(), "Failed to flush debug event metadata writer");
TF_RETURN_IF_ERROR(InitNonMetadataFile(EXECUTION));
TF_RETURN_IF_ERROR(InitNonMetadataFile(GRAPH_EXECUTION_TRACES));
is_initialized_ = true;
return absl::OkStatus();
}
Status DebugEventsWriter::WriteSourceFile(SourceFile* source_file) {
DebugEvent debug_event;
debug_event.set_allocated_source_file(source_file);
return SerializeAndWriteDebugEvent(&debug_event, SOURCE_FILES);
}
Status DebugEventsWriter::WriteStackFrameWithId(
StackFrameWithId* stack_frame_with_id) {
DebugEvent debug_event;
debug_event.set_allocated_stack_frame_with_id(stack_frame_with_id);
return SerializeAndWriteDebugEvent(&debug_event, STACK_FRAMES);
}
Status DebugEventsWriter::WriteGraphOpCreation(
GraphOpCreation* graph_op_creation) {
DebugEvent debug_event;
debug_event.set_allocated_graph_op_creation(graph_op_creation);
return SerializeAndWriteDebugEvent(&debug_event, GRAPHS);
}
Status DebugEventsWriter::WriteDebuggedGraph(DebuggedGraph* debugged_graph) {
DebugEvent debug_event;
debug_event.set_allocated_debugged_graph(debugged_graph);
return SerializeAndWriteDebugEvent(&debug_event, GRAPHS);
}
Status DebugEventsWriter::WriteExecution(Execution* execution) {
if (circular_buffer_size_ <= 0) {
DebugEvent debug_event;
debug_event.set_allocated_execution(execution);
return SerializeAndWriteDebugEvent(&debug_event, EXECUTION);
} else {
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
debug_event.set_allocated_execution(execution);
string serialized;
debug_event.SerializeToString(&serialized);
mutex_lock l(execution_buffer_mu_);
execution_buffer_.emplace_back(std::move(serialized));
if (execution_buffer_.size() > circular_buffer_size_) {
execution_buffer_.pop_front();
}
return absl::OkStatus();
}
}
Status DebugEventsWriter::WriteGraphExecutionTrace(
GraphExecutionTrace* graph_execution_trace) {
TF_RETURN_IF_ERROR(Init());
if (circular_buffer_size_ <= 0) {
DebugEvent debug_event;
debug_event.set_allocated_graph_execution_trace(graph_execution_trace);
return SerializeAndWriteDebugEvent(&debug_event, GRAPH_EXECUTION_TRACES);
} else {
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
debug_event.set_allocated_graph_execution_trace(graph_execution_trace);
string serialized;
debug_event.SerializeToString(&serialized);
mutex_lock l(graph_execution_trace_buffer_mu_);
graph_execution_trace_buffer_.emplace_back(std::move(serialized));
if (graph_execution_trace_buffer_.size() > circular_buffer_size_) {
graph_execution_trace_buffer_.pop_front();
}
return absl::OkStatus();
}
}
Status DebugEventsWriter::WriteGraphExecutionTrace(
const string& tfdbg_context_id, const string& device_name,
const string& op_name, int32_t output_slot, int32_t tensor_debug_mode,
const Tensor& tensor_value) {
std::unique_ptr<GraphExecutionTrace> trace(new GraphExecutionTrace());
trace->set_tfdbg_context_id(tfdbg_context_id);
if (!op_name.empty()) {
trace->set_op_name(op_name);
}
if (output_slot > 0) {
trace->set_output_slot(output_slot);
}
if (tensor_debug_mode > 0) {
trace->set_tensor_debug_mode(TensorDebugMode(tensor_debug_mode));
}
trace->set_device_name(device_name);
tensor_value.AsProtoTensorContent(trace->mutable_tensor_proto());
return WriteGraphExecutionTrace(trace.release());
}
void DebugEventsWriter::WriteSerializedNonExecutionDebugEvent(
const string& debug_event_str, DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
(*writer)->WriteSerializedDebugEvent(debug_event_str);
}
void DebugEventsWriter::WriteSerializedExecutionDebugEvent(
const string& debug_event_str, DebugEventFileType type) {
const std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
std::deque<string>* buffer = nullptr;
mutex* mu = nullptr;
switch (type) {
case EXECUTION:
writer = &execution_writer_;
buffer = &execution_buffer_;
mu = &execution_buffer_mu_;
break;
case GRAPH_EXECUTION_TRACES:
writer = &graph_execution_traces_writer_;
buffer = &graph_execution_trace_buffer_;
mu = &graph_execution_trace_buffer_mu_;
break;
default:
return;
}
if (circular_buffer_size_ <= 0) {
(*writer)->WriteSerializedDebugEvent(debug_event_str);
} else {
mutex_lock l(*mu);
buffer->push_back(debug_event_str);
if (buffer->size() > circular_buffer_size_) {
buffer->pop_front();
}
}
}
int DebugEventsWriter::RegisterDeviceAndGetId(const string& device_name) {
mutex_lock l(device_mu_);
int& device_id = device_name_to_id_[device_name];
if (device_id == 0) {
device_id = device_name_to_id_.size();
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
DebuggedDevice* debugged_device = debug_event.mutable_debugged_device();
debugged_device->set_device_name(device_name);
debugged_device->set_device_id(device_id);
string serialized;
debug_event.SerializeToString(&serialized);
graphs_writer_->WriteSerializedDebugEvent(serialized);
}
return device_id;
}
Status DebugEventsWriter::FlushNonExecutionFiles() {
TF_RETURN_IF_ERROR(Init());
if (source_files_writer_ != nullptr) {
TF_RETURN_IF_ERROR(source_files_writer_->Flush());
}
if (stack_frames_writer_ != nullptr) {
TF_RETURN_IF_ERROR(stack_frames_writer_->Flush());
}
if (graphs_writer_ != nullptr) {
TF_RETURN_IF_ERROR(graphs_writer_->Flush());
}
return absl::OkStatus();
}
Status DebugEventsWriter::FlushExecutionFiles() {
TF_RETURN_IF_ERROR(Init());
if (execution_writer_ != nullptr) {
if (circular_buffer_size_ > 0) {
mutex_lock l(execution_buffer_mu_);
while (!execution_buffer_.empty()) {
execution_writer_->WriteSerializedDebugEvent(execution_buffer_.front());
execution_buffer_.pop_front();
}
}
TF_RETURN_IF_ERROR(execution_writer_->Flush());
}
if (graph_execution_traces_writer_ != nullptr) {
if (circular_buffer_size_ > 0) {
mutex_lock l(graph_execution_trace_buffer_mu_);
while (!graph_execution_trace_buffer_.empty()) {
graph_execution_traces_writer_->WriteSerializedDebugEvent(
graph_execution_trace_buffer_.front());
graph_execution_trace_buffer_.pop_front();
}
}
TF_RETURN_IF_ERROR(graph_execution_traces_writer_->Flush());
}
return absl::OkStatus();
}
string DebugEventsWriter::FileName(DebugEventFileType type) {
if (file_prefix_.empty()) {
Init().IgnoreError();
}
return GetFileNameInternal(type);
}
Status DebugEventsWriter::Close() {
{
mutex_lock l(initialization_mu_);
if (!is_initialized_) {
return absl::OkStatus();
}
}
std::vector<string> failed_to_close_files;
if (metadata_writer_ != nullptr) {
if (!metadata_writer_->Close().ok()) {
failed_to_close_files.push_back(metadata_writer_->FileName());
}
metadata_writer_.reset(nullptr);
}
TF_RETURN_IF_ERROR(FlushNonExecutionFiles());
if (source_files_writer_ != nullptr) {
if (!source_files_writer_->Close().ok()) {
failed_to_close_files.push_back(source_files_writer_->FileName());
}
source_files_writer_.reset(nullptr);
}
if (stack_frames_writer_ != nullptr) {
if (!stack_frames_writer_->Close().ok()) {
failed_to_close_files.push_back(stack_frames_writer_->FileName());
}
stack_frames_writer_.reset(nullptr);
}
if (graphs_writer_ != nullptr) {
if (!graphs_writer_->Close().ok()) {
failed_to_close_files.push_back(graphs_writer_->FileName());
}
graphs_writer_.reset(nullptr);
}
TF_RETURN_IF_ERROR(FlushExecutionFiles());
if (execution_writer_ != nullptr) {
if (!execution_writer_->Close().ok()) {
failed_to_close_files.push_back(execution_writer_->FileName());
}
execution_writer_.reset(nullptr);
}
if (graph_execution_traces_writer_ != nullptr) {
if (!graph_execution_traces_writer_->Close().ok()) {
failed_to_close_files.push_back(
graph_execution_traces_writer_->FileName());
}
graph_execution_traces_writer_.reset(nullptr);
}
if (failed_to_close_files.empty()) {
return absl::OkStatus();
} else {
return errors::FailedPrecondition(
"Failed to close %d debug-events files associated with tfdbg",
failed_to_close_files.size());
}
}
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>*
DebugEventsWriter::GetDebugEventsWriterMap() {
static std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>*
writer_pool =
new std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>();
return writer_pool;
}
DebugEventsWriter::DebugEventsWriter(const string& dump_root,
const string& tfdbg_run_id,
int64_t circular_buffer_size)
: env_(Env::Default()),
dump_root_(dump_root),
tfdbg_run_id_(tfdbg_run_id),
is_initialized_(false),
initialization_mu_(),
circular_buffer_size_(circular_buffer_size),
execution_buffer_(),
execution_buffer_mu_(),
graph_execution_trace_buffer_(),
graph_execution_trace_buffer_mu_(),
device_name_to_id_(),
device_mu_() {}
Status DebugEventsWriter::InitNonMetadataFile(DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
const string filename = GetFileNameInternal(type);
writer->reset();
*writer = std::make_unique<SingleDebugEventFileWriter>(filename);
if (*writer == nullptr) {
return errors::Unknown("Could not create debug event file writer for ",
filename);
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
(*writer)->Init(), "Initializing debug event writer at path ", filename);
VLOG(1) << "Successfully opened debug event file: " << filename;
return absl::OkStatus();
}
Status DebugEventsWriter::SerializeAndWriteDebugEvent(DebugEvent* debug_event,
DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
if (writer != nullptr) {
MaybeSetDebugEventTimestamp(debug_event, env_);
string str;
debug_event->AppendToString(&str);
(*writer)->WriteSerializedDebugEvent(str);
return absl::OkStatus();
} else {
return errors::Internal(
"Unable to find debug events file writer for DebugEventsFileType ",
type);
}
}
void DebugEventsWriter::SelectWriter(
DebugEventFileType type,
std::unique_ptr<SingleDebugEventFileWriter>** writer) {
switch (type) {
case METADATA:
*writer = &metadata_writer_;
break;
case SOURCE_FILES:
*writer = &source_files_writer_;
break;
case STACK_FRAMES:
*writer = &stack_frames_writer_;
break;
case GRAPHS:
*writer = &graphs_writer_;
break;
case EXECUTION:
*writer = &execution_writer_;
break;
case GRAPH_EXECUTION_TRACES:
*writer = &graph_execution_traces_writer_;
break;
}
}
const string DebugEventsWriter::GetSuffix(DebugEventFileType type) {
switch (type) {
case METADATA:
return kMetadataSuffix;
case SOURCE_FILES:
return kSourceFilesSuffix;
case STACK_FRAMES:
return kStackFramesSuffix;
case GRAPHS:
return kGraphsSuffix;
case EXECUTION:
return kExecutionSuffix;
case GRAPH_EXECUTION_TRACES:
return kGraphExecutionTracesSuffix;
default:
string suffix;
return suffix;
}
}
string DebugEventsWriter::GetFileNameInternal(DebugEventFileType type) {
const string suffix = GetSuffix(type);
return strings::StrCat(file_prefix_, ".", suffix);
}
}
} | #include "tensorflow/core/util/debug_events_writer.h"
#include <algorithm>
#include <atomic>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfdbg {
Env* env() { return Env::Default(); }
class DebugEventsWriterTest : public ::testing::Test {
public:
static string GetDebugEventFileName(DebugEventsWriter* writer,
DebugEventFileType type) {
return writer->FileName(type);
}
static void ReadDebugEventProtos(DebugEventsWriter* writer,
DebugEventFileType type,
std::vector<DebugEvent>* protos) {
protos->clear();
const string filename = writer->FileName(type);
std::unique_ptr<RandomAccessFile> debug_events_file;
TF_CHECK_OK(env()->NewRandomAccessFile(filename, &debug_events_file));
io::RecordReader* reader = new io::RecordReader(debug_events_file.get());
uint64 offset = 0;
DebugEvent actual;
while (ReadDebugEventProto(reader, &offset, &actual)) {
protos->push_back(actual);
}
delete reader;
}
static bool ReadDebugEventProto(io::RecordReader* reader, uint64* offset,
DebugEvent* proto) {
tstring record;
Status s = reader->ReadRecord(offset, &record);
if (!s.ok()) {
return false;
}
return ParseProtoUnlimited(proto, record);
}
void SetUp() override {
dump_root_ = io::JoinPath(
testing::TmpDir(),
strings::Printf("%010lld", static_cast<long long>(env()->NowMicros())));
tfdbg_run_id_ = "test_tfdbg_run_id";
}
void TearDown() override {
if (env()->IsDirectory(dump_root_).ok()) {
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
TF_ASSERT_OK(env()->DeleteRecursively(dump_root_, &undeleted_files,
&undeleted_dirs));
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
string dump_root_;
string tfdbg_run_id_;
};
TEST_F(DebugEventsWriterTest, GetDebugEventsWriterSameRootGivesSameObject) {
DebugEventsWriter* writer_1 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
DebugEventsWriter* writer_2 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
EXPECT_EQ(writer_1, writer_2);
}
TEST_F(DebugEventsWriterTest, ConcurrentGetDebugEventsWriterSameDumpRoot) {
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 4);
std::vector<DebugEventsWriter*> writers;
mutex mu;
auto fn = [this, &writers, &mu]() {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
{
mutex_lock l(mu);
writers.push_back(writer);
}
};
for (size_t i = 0; i < 4; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
EXPECT_EQ(writers.size(), 4);
EXPECT_EQ(writers[0], writers[1]);
EXPECT_EQ(writers[1], writers[2]);
EXPECT_EQ(writers[2], writers[3]);
}
TEST_F(DebugEventsWriterTest, ConcurrentGetDebugEventsWriterDiffDumpRoots) {
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 3);
std::atomic_int_fast64_t counter(0);
std::vector<DebugEventsWriter*> writers;
mutex mu;
auto fn = [this, &counter, &writers, &mu]() {
const string new_dump_root =
io::JoinPath(dump_root_, strings::Printf("%ld", counter.fetch_add(1)));
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
new_dump_root, tfdbg_run_id_,
DebugEventsWriter::kDefaultCyclicBufferSize);
{
mutex_lock l(mu);
writers.push_back(writer);
}
};
for (size_t i = 0; i < 3; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
EXPECT_EQ(writers.size(), 3);
EXPECT_NE(writers[0], writers[1]);
EXPECT_NE(writers[0], writers[2]);
EXPECT_NE(writers[1], writers[2]);
}
TEST_F(DebugEventsWriterTest, GetDebugEventsWriterDifferentRoots) {
DebugEventsWriter* writer_1 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
const string dump_root_2 = io::JoinPath(dump_root_, "subdirectory");
DebugEventsWriter* writer_2 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_2, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
EXPECT_NE(writer_1, writer_2);
}
TEST_F(DebugEventsWriterTest, GetAndInitDebugEventsWriter) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
const string file_version = actuals[0].debug_metadata().file_version();
EXPECT_EQ(file_version.find(DebugEventsWriter::kVersionPrefix), 0);
EXPECT_GT(file_version.size(), strlen(DebugEventsWriter::kVersionPrefix));
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
}
TEST_F(DebugEventsWriterTest, CallingCloseWithoutInitIsOkay) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, CallingCloseTwiceIsOkay) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Close());
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, ConcurrentInitCalls) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 4);
auto fn = [&writer]() { TF_ASSERT_OK(writer->Init()); };
for (size_t i = 0; i < 3; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
const string file_version = actuals[0].debug_metadata().file_version();
EXPECT_EQ(file_version.find(DebugEventsWriter::kVersionPrefix), 0);
EXPECT_GT(file_version.size(), strlen(DebugEventsWriter::kVersionPrefix));
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
}
TEST_F(DebugEventsWriterTest, InitTwiceDoesNotCreateNewMetadataFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
EXPECT_GE(actuals[0].debug_metadata().file_version().size(), 0);
string metadata_path_1 =
GetDebugEventFileName(writer, DebugEventFileType::METADATA);
TF_ASSERT_OK(writer->Init());
EXPECT_EQ(GetDebugEventFileName(writer, DebugEventFileType::METADATA),
metadata_path_1);
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
EXPECT_GE(actuals[0].debug_metadata().file_version().size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteSourceFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
SourceFile* source_file_1 = new SourceFile();
source_file_1->set_file_path("/home/tf_programs/main.py");
source_file_1->set_host_name("localhost.localdomain");
source_file_1->add_lines("import tensorflow as tf");
source_file_1->add_lines("");
source_file_1->add_lines("print(tf.constant([42.0]))");
source_file_1->add_lines("");
TF_ASSERT_OK(writer->WriteSourceFile(source_file_1));
SourceFile* source_file_2 = new SourceFile();
source_file_2->set_file_path("/home/tf_programs/train.py");
source_file_2->set_host_name("localhost.localdomain");
source_file_2->add_lines("import tensorflow.keras as keras");
source_file_2->add_lines("");
source_file_2->add_lines("model = keras.Sequential()");
TF_ASSERT_OK(writer->WriteSourceFile(source_file_2));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
SourceFile actual_source_file_1 = actuals[0].source_file();
EXPECT_EQ(actual_source_file_1.file_path(), "/home/tf_programs/main.py");
EXPECT_EQ(actual_source_file_1.host_name(), "localhost.localdomain");
EXPECT_EQ(actual_source_file_1.lines().size(), 4);
EXPECT_EQ(actual_source_file_1.lines()[0], "import tensorflow as tf");
EXPECT_EQ(actual_source_file_1.lines()[1], "");
EXPECT_EQ(actual_source_file_1.lines()[2], "print(tf.constant([42.0]))");
EXPECT_EQ(actual_source_file_1.lines()[3], "");
SourceFile actual_source_file_2 = actuals[1].source_file();
EXPECT_EQ(actual_source_file_2.file_path(), "/home/tf_programs/train.py");
EXPECT_EQ(actual_source_file_2.host_name(), "localhost.localdomain");
EXPECT_EQ(actual_source_file_2.lines().size(), 3);
EXPECT_EQ(actual_source_file_2.lines()[0],
"import tensorflow.keras as keras");
EXPECT_EQ(actual_source_file_2.lines()[1], "");
EXPECT_EQ(actual_source_file_2.lines()[2], "model = keras.Sequential()");
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteStackFramesFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
StackFrameWithId* stack_frame_1 = new StackFrameWithId();
stack_frame_1->set_id("deadbeaf");
GraphDebugInfo::FileLineCol* file_line_col =
stack_frame_1->mutable_file_line_col();
file_line_col->set_file_index(12);
file_line_col->set_line(20);
file_line_col->set_col(2);
file_line_col->set_func("my_func");
file_line_col->set_code(" x = y + z");
StackFrameWithId* stack_frame_2 = new StackFrameWithId();
stack_frame_2->set_id("eeeeeeec");
file_line_col = stack_frame_2->mutable_file_line_col();
file_line_col->set_file_index(12);
file_line_col->set_line(21);
file_line_col->set_col(4);
file_line_col->set_func("my_func");
file_line_col->set_code(" x = x ** 2.0");
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame_1));
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame_2));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
StackFrameWithId actual_stack_frame_1 = actuals[0].stack_frame_with_id();
EXPECT_EQ(actual_stack_frame_1.id(), "deadbeaf");
GraphDebugInfo::FileLineCol file_line_col_1 =
actual_stack_frame_1.file_line_col();
EXPECT_EQ(file_line_col_1.file_index(), 12);
EXPECT_EQ(file_line_col_1.line(), 20);
EXPECT_EQ(file_line_col_1.col(), 2);
EXPECT_EQ(file_line_col_1.func(), "my_func");
EXPECT_EQ(file_line_col_1.code(), " x = y + z");
StackFrameWithId actual_stack_frame_2 = actuals[1].stack_frame_with_id();
EXPECT_EQ(actual_stack_frame_2.id(), "eeeeeeec");
GraphDebugInfo::FileLineCol file_line_col_2 =
actual_stack_frame_2.file_line_col();
EXPECT_EQ(file_line_col_2.file_index(), 12);
EXPECT_EQ(file_line_col_2.line(), 21);
EXPECT_EQ(file_line_col_2.col(), 4);
EXPECT_EQ(file_line_col_2.func(), "my_func");
EXPECT_EQ(file_line_col_2.code(), " x = x ** 2.0");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteGraphOpCreationAndDebuggedGraph) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
GraphOpCreation* graph_op_creation = new GraphOpCreation();
graph_op_creation->set_op_type("MatMul");
graph_op_creation->set_op_name("Dense_1/MatMul");
TF_ASSERT_OK(writer->WriteGraphOpCreation(graph_op_creation));
DebuggedGraph* debugged_graph = new DebuggedGraph();
debugged_graph->set_graph_id("deadbeaf");
debugged_graph->set_graph_name("my_func_graph");
TF_ASSERT_OK(writer->WriteDebuggedGraph(debugged_graph));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
GraphOpCreation actual_op_creation = actuals[0].graph_op_creation();
EXPECT_EQ(actual_op_creation.op_type(), "MatMul");
EXPECT_EQ(actual_op_creation.op_name(), "Dense_1/MatMul");
DebuggedGraph actual_debugged_graph = actuals[1].debugged_graph();
EXPECT_EQ(actual_debugged_graph.graph_id(), "deadbeaf");
EXPECT_EQ(actual_debugged_graph.graph_name(), "my_func_graph");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteCallsToTheSameFile) {
const size_t kConcurrentWrites = 100;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
const string file_path = strings::Printf(
"/home/tf_programs/program_%.3ld.py", counter.fetch_add(1));
SourceFile* source_file = new SourceFile();
source_file->set_file_path(file_path);
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites);
std::vector<string> file_paths;
std::vector<string> host_names;
for (size_t i = 0; i < kConcurrentWrites; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (size_t i = 0; i < kConcurrentWrites; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.3ld.py", i));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteAndFlushCallsToTheSameFile) {
const size_t kConcurrentWrites = 100;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
const string file_path = strings::Printf(
"/home/tf_programs/program_%.3ld.py", counter.fetch_add(1));
SourceFile* source_file = new SourceFile();
source_file->set_file_path(file_path);
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites);
std::vector<string> file_paths;
std::vector<string> host_names;
for (size_t i = 0; i < kConcurrentWrites; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (size_t i = 0; i < kConcurrentWrites; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.3ld.py", i));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteCallsToTheDifferentFiles) {
const int32_t kConcurrentWrites = 30;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 10);
std::atomic_int_fast32_t counter(0);
auto fn = [&writer, &counter]() {
const int32_t index = counter.fetch_add(1);
if (index % 3 == 0) {
SourceFile* source_file = new SourceFile();
source_file->set_file_path(
strings::Printf("/home/tf_programs/program_%.2d.py", index));
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
} else if (index % 3 == 1) {
StackFrameWithId* stack_frame = new StackFrameWithId();
stack_frame->set_id(strings::Printf("e%.2d", index));
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame));
} else {
GraphOpCreation* op_creation = new GraphOpCreation();
op_creation->set_op_type("Log");
op_creation->set_op_name(strings::Printf("Log_%.2d", index));
TF_ASSERT_OK(writer->WriteGraphOpCreation(op_creation));
}
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> file_paths;
std::vector<string> host_names;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.2d.py", i * 3));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> stack_frame_ids;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
stack_frame_ids.push_back(actuals[i].stack_frame_with_id().id());
}
std::sort(stack_frame_ids.begin(), stack_frame_ids.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(stack_frame_ids[i], strings::Printf("e%.2d", i * 3 + 1));
}
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> op_types;
std::vector<string> op_names;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
op_types.push_back(actuals[i].graph_op_creation().op_type());
op_names.push_back(actuals[i].graph_op_creation().op_name());
}
std::sort(op_names.begin(), op_names.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(op_types[i], "Log");
EXPECT_EQ(op_names[i], strings::Printf("Log_%.2d", i * 3 + 2));
}
}
TEST_F(DebugEventsWriterTest, WriteExecutionWithCyclicBufferNoFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteExecutionWithCyclicBufferFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
EXPECT_EQ(actuals[i].execution().op_type(), "Log");
EXPECT_EQ(actuals[i].execution().input_tensor_ids().size(), 1);
EXPECT_EQ(actuals[i].execution().input_tensor_ids()[0],
kCyclicBufferSize + i);
}
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
Execution* execution = new Execution();
execution->set_op_type("Abs");
execution->add_input_tensor_ids(counter.fetch_add(1));
TF_ASSERT_OK(writer->WriteExecution(execution));
};
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize * 2);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
const size_t index = i + kCyclicBufferSize;
EXPECT_EQ(actuals[index].execution().op_type(), "Abs");
EXPECT_EQ(actuals[index].execution().input_tensor_ids().size(), 1);
EXPECT_GE(actuals[index].execution().input_tensor_ids()[0], 0);
EXPECT_LE(actuals[index].execution().input_tensor_ids()[0],
kCyclicBufferSize * 2);
}
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithCyclicBufferNoFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithoutPreviousInitCall) {
const size_t kCyclicBufferSize = -1;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_0"));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_EQ(actuals[0].graph_execution_trace().tfdbg_context_id(), "graph_0");
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithCyclicBufferFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
EXPECT_EQ(actuals[i].graph_execution_trace().tfdbg_context_id(),
strings::Printf("graph_%.2ld", i + kCyclicBufferSize));
}
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(
strings::Printf("new_graph_%.2ld", counter.fetch_add(1)));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
};
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize * 2);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
const size_t index = i + kCyclicBufferSize;
EXPECT_EQ(actuals[index].graph_execution_trace().tfdbg_context_id().find(
"new_graph_"),
0);
}
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, RegisterDeviceAndGetIdTrace) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
int device_ids[8];
for (int i = 0; i < 8; ++i) {
thread_pool->Schedule([i, &writer, &device_ids]() {
const string device_name = strings::Printf(
"/job:localhost/replica:0/task:0/device:GPU:%d", i % 4);
device_ids[i] = writer->RegisterDeviceAndGetId(device_name);
});
}
delete thread_pool;
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
EXPECT_EQ(device_ids[0], device_ids[4]);
EXPECT_EQ(device_ids[1], device_ids[5]);
EXPECT_EQ(device_ids[2], device_ids[6]);
EXPECT_EQ(device_ids[3], device_ids[7]);
EXPECT_EQ(absl::flat_hash_set<int>(device_ids, device_ids + 8).size(), 4);
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 4);
for (const DebugEvent& actual : actuals) {
const string& device_name = actual.debugged_device().device_name();
int device_index = -1;
CHECK(absl::SimpleAtoi(device_name.substr(strlen(
"/job:localhost/replica:0/task:0/device:GPU:")),
&device_index));
EXPECT_EQ(actual.debugged_device().device_id(), device_ids[device_index]);
}
}
TEST_F(DebugEventsWriterTest, DisableCyclicBufferBehavior) {
const size_t kCyclicBufferSize = 0;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
const size_t kNumEvents = 20;
for (size_t i = 0; i < kNumEvents; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kNumEvents);
for (size_t i = 0; i < kNumEvents; ++i) {
EXPECT_EQ(actuals[i].execution().op_type(), "Log");
EXPECT_EQ(actuals[i].execution().input_tensor_ids().size(), 1);
EXPECT_EQ(actuals[i].execution().input_tensor_ids()[0], i);
}
for (size_t i = 0; i < kNumEvents; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kNumEvents);
for (size_t i = 0; i < kNumEvents; ++i) {
EXPECT_EQ(actuals[i].graph_execution_trace().tfdbg_context_id(),
strings::Printf("graph_%.2ld", i));
}
TF_ASSERT_OK(writer->Close());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_events_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_events_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
330e0a6b-f0a5-44be-8255-1dab71a09c94 | cpp | tensorflow/tensorflow | events_writer | tensorflow/core/util/events_writer.cc | tensorflow/core/util/events_writer_test.cc | #include "tensorflow/core/util/events_writer.h"
#include <stddef.h>
#include <memory>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
EventsWriter::EventsWriter(const string& file_prefix)
: env_(Env::Default()),
file_prefix_(file_prefix),
num_outstanding_events_(0) {}
EventsWriter::~EventsWriter() {
Close().IgnoreError();
}
Status EventsWriter::Init() { return InitWithSuffix(""); }
Status EventsWriter::InitWithSuffix(const string& suffix) {
file_suffix_ = suffix;
return InitIfNeeded();
}
Status EventsWriter::InitIfNeeded() {
if (recordio_writer_ != nullptr) {
CHECK(!filename_.empty());
if (!FileStillExists().ok()) {
if (num_outstanding_events_ > 0) {
LOG(WARNING) << "Re-initialization, attempting to open a new file, "
<< num_outstanding_events_ << " events will be lost.";
}
} else {
return absl::OkStatus();
}
}
int64_t time_in_seconds = env_->NowMicros() / 1000000;
filename_ =
strings::Printf("%s.out.tfevents.%010lld.%s%s", file_prefix_.c_str(),
static_cast<long long>(time_in_seconds),
port::Hostname().c_str(), file_suffix_.c_str());
recordio_writer_.reset();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env_->NewWritableFile(filename_, &recordio_file_),
"Creating writable file ", filename_);
recordio_writer_ = std::make_unique<io::RecordWriter>(recordio_file_.get());
if (recordio_writer_ == nullptr) {
return errors::Unknown("Could not create record writer");
}
num_outstanding_events_ = 0;
VLOG(1) << "Successfully opened events file: " << filename_;
{
Event event;
event.set_wall_time(time_in_seconds);
event.set_file_version(strings::StrCat(kVersionPrefix, kCurrentVersion));
SourceMetadata* source_metadata = event.mutable_source_metadata();
source_metadata->set_writer(kWriterSourceMetadata);
WriteEvent(event);
TF_RETURN_WITH_CONTEXT_IF_ERROR(Flush(), "Flushing first event.");
}
return absl::OkStatus();
}
string EventsWriter::FileName() {
if (filename_.empty()) {
InitIfNeeded().IgnoreError();
}
return filename_;
}
void EventsWriter::WriteSerializedEvent(StringPiece event_str) {
if (recordio_writer_ == nullptr) {
if (!InitIfNeeded().ok()) {
LOG(ERROR) << "Write failed because file could not be opened.";
return;
}
}
num_outstanding_events_++;
recordio_writer_->WriteRecord(event_str).IgnoreError();
}
void EventsWriter::WriteEvent(const Event& event) {
string record;
event.AppendToString(&record);
WriteSerializedEvent(record);
}
Status EventsWriter::Flush() {
if (num_outstanding_events_ == 0) return absl::OkStatus();
CHECK(recordio_file_ != nullptr) << "Unexpected NULL file";
TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_writer_->Flush(), "Failed to flush ",
num_outstanding_events_, " events to ",
filename_);
TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_file_->Sync(), "Failed to sync ",
num_outstanding_events_, " events to ",
filename_);
VLOG(1) << "Wrote " << num_outstanding_events_ << " events to disk.";
num_outstanding_events_ = 0;
return absl::OkStatus();
}
Status EventsWriter::Close() {
Status status = Flush();
if (recordio_file_ != nullptr) {
Status close_status = recordio_file_->Close();
if (!close_status.ok()) {
status = close_status;
}
recordio_writer_.reset(nullptr);
recordio_file_.reset(nullptr);
}
num_outstanding_events_ = 0;
return status;
}
Status EventsWriter::FileStillExists() {
if (env_->FileExists(filename_).ok()) {
return absl::OkStatus();
}
return errors::Unknown("The events file ", filename_, " has disappeared.");
}
} | #include "tensorflow/core/util/events_writer.h"
#include <math.h>
#include <memory>
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
Env* env() { return Env::Default(); }
void WriteSimpleValue(EventsWriter* writer, double wall_time, int64_t step,
const string& tag, float simple_value) {
Event event;
event.set_wall_time(wall_time);
event.set_step(step);
Summary::Value* summ_val = event.mutable_summary()->add_value();
summ_val->set_tag(tag);
summ_val->set_simple_value(simple_value);
writer->WriteEvent(event);
}
void WriteFile(EventsWriter* writer) {
WriteSimpleValue(writer, 1234, 34, "foo", 3.14159);
WriteSimpleValue(writer, 2345, 35, "bar", -42);
}
static bool ReadEventProto(io::RecordReader* reader, uint64* offset,
Event* proto) {
tstring record;
Status s = reader->ReadRecord(offset, &record);
if (!s.ok()) {
return false;
}
return ParseProtoUnlimited(proto, record);
}
void VerifyFile(const string& filename) {
TF_CHECK_OK(env()->FileExists(filename));
std::unique_ptr<RandomAccessFile> event_file;
TF_CHECK_OK(env()->NewRandomAccessFile(filename, &event_file));
io::RecordReader* reader = new io::RecordReader(event_file.get());
uint64 offset = 0;
Event actual;
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
double current_time = env()->NowMicros() / 1000000.0;
EXPECT_LT(fabs(actual.wall_time() - current_time), 5);
EXPECT_EQ(actual.file_version(),
strings::StrCat(EventsWriter::kVersionPrefix,
EventsWriter::kCurrentVersion));
EXPECT_EQ(actual.source_metadata().writer(),
EventsWriter::kWriterSourceMetadata);
Event expected;
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"wall_time: 1234 step: 34 "
"summary { value { tag: 'foo' simple_value: 3.14159 } }",
&expected));
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"wall_time: 2345 step: 35 "
"summary { value { tag: 'bar' simple_value: -42 } }",
&expected));
TF_CHECK_OK(env()->DeleteFile(filename));
delete reader;
}
string GetDirName(const string& suffix) {
return io::JoinPath(testing::TmpDir(), suffix);
}
TEST(EventWriter, WriteFlush) {
string file_prefix = GetDirName("/writeflush_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, WriteClose) {
string file_prefix = GetDirName("/writeclose_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, WriteDelete) {
string file_prefix = GetDirName("/writedelete_test");
EventsWriter* writer = new EventsWriter(file_prefix);
WriteFile(writer);
string filename = writer->FileName();
delete writer;
VerifyFile(filename);
}
TEST(EventWriter, FailFlush) {
string file_prefix = GetDirName("/failflush_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Flush().ok());
}
TEST(EventWriter, FailClose) {
string file_prefix = GetDirName("/failclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Close().ok());
}
TEST(EventWriter, InitWriteClose) {
string file_prefix = GetDirName("/initwriteclose_test");
EventsWriter writer(file_prefix);
TF_EXPECT_OK(writer.Init());
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename1 = writer.FileName();
EXPECT_EQ(filename0, filename1);
VerifyFile(filename1);
}
TEST(EventWriter, NameWriteClose) {
string file_prefix = GetDirName("/namewriteclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
VerifyFile(filename);
}
TEST(EventWriter, NameClose) {
string file_prefix = GetDirName("/nameclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(writer.Close());
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
}
TEST(EventWriter, FileDeletionBeforeWriting) {
string file_prefix = GetDirName("/fdbw_test");
EventsWriter writer(file_prefix);
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
env()->SleepForMicroseconds(
2000000);
TF_ASSERT_OK(env()->DeleteFile(filename0));
TF_EXPECT_OK(writer.Init());
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename1 = writer.FileName();
EXPECT_NE(filename0, filename1);
VerifyFile(filename1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/events_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/events_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dc80c6e-c24a-4fbb-9eae-1afc9c6733d1 | cpp | tensorflow/tensorflow | batch_util | tensorflow/core/util/batch_util.cc | tensorflow/core/framework/batch_util_test.cc | #include "tensorflow/core/util/batch_util.h"
#include <algorithm>
#include <utility>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#define TF_CALL_DATASET_TYPES(m) TF_CALL_ALL_TYPES(m) TF_CALL_QUANTIZED_TYPES(m)
namespace tensorflow {
namespace batch_util {
namespace {
Status ValidateInput(const Tensor& parent, const Tensor& element,
int64_t index) {
DCHECK_NE(parent.dim_size(0), 0);
DCHECK_GE(index, 0);
if (element.NumElements() != (parent.NumElements() / parent.dim_size(0))) {
TensorShape chip_shape = parent.shape();
chip_shape.RemoveDim(0);
return errors::Internal(
"ValidateInput Cannot perform copy: number of elements does not match. "
" Shapes are: [element]: ",
element.shape().DebugString(),
", [parent slice]: ", chip_shape.DebugString());
}
return absl::OkStatus();
}
template <typename T>
Status HandleElementToSlice(const Tensor& , T* src, T* dest,
int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<tstring>(const Tensor& element, tstring* src,
tstring* dest, int64_t num_values) {
if (element.RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
*dest++ = std::move(*src++);
}
} else {
std::copy_n(src, num_values, dest);
}
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<Variant>(const Tensor& element, Variant* src,
Variant* dest, int64_t num_values) {
if (element.RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
*dest++ = std::move(*src++);
}
} else {
std::copy_n(src, num_values, dest);
}
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<ResourceHandle>(const Tensor& ,
ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<Eigen::half>(const Tensor& ,
Eigen::half* src, Eigen::half* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
return absl::OkStatus();
}
template <typename T>
void HandleSliceToElement(const T* src, T* dest, int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
}
template <>
void HandleSliceToElement<tstring>(const tstring* src, tstring* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Variant>(const Variant* src, Variant* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<ResourceHandle>(const ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Eigen::half>(const Eigen::half* src,
Eigen::half* dest, int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <typename T>
void HandleSliceToElement(Tensor* parent, T* src, T* dest, int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
}
template <>
void HandleSliceToElement<tstring>(Tensor* parent, tstring* src, tstring* dest,
int64_t num_values) {
if (parent->RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
dest[i] = std::move(src[i]);
}
} else {
std::copy_n(src, num_values, dest);
}
}
template <>
void HandleSliceToElement<Variant>(Tensor* parent, Variant* src, Variant* dest,
int64_t num_values) {
if (parent->RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
dest[i] = std::move(src[i]);
}
} else {
std::copy_n(src, num_values, dest);
}
}
template <>
void HandleSliceToElement<ResourceHandle>(Tensor* parent, ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Eigen::half>(Tensor* parent, Eigen::half* src,
Eigen::half* dest, int64_t num_values) {
std::copy_n(src, num_values, dest);
}
}
Status CopyElementToSlice(Tensor element, Tensor* parent, int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(*parent, element, index));
const int64_t num_values = element.NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src = element.base<T>(); \
T* dest = parent->base<T>() + (num_values * index); \
return HandleElementToSlice<T>(element, src, dest, num_values); \
}
switch (element.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopyElementToSlice Unhandled data type: ",
element.dtype());
}
}
Status CopySliceToElement(const Tensor& parent, Tensor* element,
int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(parent, *element, index));
const int64_t num_values = element->NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
const T* src = parent.base<T>() + (num_values * index); \
T* dest = element->base<T>(); \
HandleSliceToElement<T>(src, dest, num_values); \
return OkStatus(); \
}
switch (parent.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopySliceToElement Unhandled data type: ",
element->dtype());
}
}
Status MaybeMoveContiguousSlices(Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst) {
if (src.dtype() != dst->dtype()) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: src and dst have "
"different "
"dtypes. Source dtype: ",
src.dtype(), " dstination dtype: ", dst->dtype(), "."));
}
if (src.dims() < 1) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: src has to be a tensor "
"with "
"rank >= 1. Source shape: ",
src.shape().DebugString()));
}
if (dst->dims() < 1) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: dst has to be a tensor "
"with rank >= 1. Dest shape: ",
dst->shape().DebugString()));
}
const int64_t src_dim0 = src.dim_size(0);
const int64_t dst_dim0 = dst->dim_size(0);
int64_t src_chip_size = 1;
int64_t dst_chip_size = 1;
for (int i = 1; i < src.dims(); ++i) {
src_chip_size *= src.dim_size(i);
}
for (int i = 1; i < dst->dims(); ++i) {
dst_chip_size *= dst->dim_size(i);
}
if (src_chip_size != dst_chip_size) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: source and dst shapes "
"are"
"not compatible. Source shape: ",
src.shape().DebugString(),
", dst shape: ", dst->shape().DebugString()));
}
if (src_chip_size == 0 && dst_chip_size == 0) {
return absl::OkStatus();
}
if (src_offset < 0 || src_offset + num_slices > src_dim0 || dst_offset < 0 ||
dst_offset + num_slices > dst_dim0) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: index out of range. "
"src_offset: ",
src_offset, ", num_slices: ", num_slices, ", src_dim0: ", src_dim0,
", dst_offset: ", dst_offset, ", dst_dim0: ", dst_dim0, "."));
}
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src_p = src.base<T>() + (src_chip_size * src_offset); \
T* dst_p = dst->base<T>() + (dst_chip_size * dst_offset); \
HandleSliceToElement<T>(&src, src_p, dst_p, src_chip_size * num_slices); \
return OkStatus(); \
}
switch (src.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices unhandled data type: ", src.dtype()));
}
}
Status CopyContiguousSlices(const Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst) {
if (src.dtype() != dst->dtype()) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: src and dst have different "
"dtypes. Source dtype: ",
src.dtype(), " dstination dtype: ", dst->dtype(), ".");
}
if (src.dims() < 1) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: src has to be a tensor with "
"rank >= 1. Source shape: ",
src.shape().DebugString());
}
if (dst->dims() < 1) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: dst has to be a tensor "
"with rank >= 1. Dest shape: ",
dst->shape().DebugString());
}
const int64_t src_dim0 = src.dim_size(0);
const int64_t dst_dim0 = dst->dim_size(0);
int64_t src_chip_size = 1;
int64_t dst_chip_size = 1;
for (int i = 1; i < src.dims(); ++i) {
src_chip_size *= src.dim_size(i);
}
for (int i = 1; i < dst->dims(); ++i) {
dst_chip_size *= dst->dim_size(i);
}
if (src_chip_size != dst_chip_size) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: source and dst shapes are"
"not compatible. Source shape: ",
src.shape().DebugString(), ", dst shape: ", dst->shape().DebugString());
}
if (src_chip_size == 0 && dst_chip_size == 0) {
return absl::OkStatus();
}
if (src_offset < 0 || src_offset + num_slices > src_dim0 || dst_offset < 0 ||
dst_offset + num_slices > dst_dim0) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: index out of range. "
"src_offset: ",
src_offset, ", num_slices: ", num_slices, ", src_dim0: ", src_dim0,
", dst_offset: ", dst_offset, ", dst_dim0: ", dst_dim0, ".");
}
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
const T* src_p = src.base<T>() + (src_chip_size * src_offset); \
T* dst_p = dst->base<T>() + (dst_chip_size * dst_offset); \
HandleSliceToElement<T>(src_p, dst_p, src_chip_size * num_slices); \
return OkStatus(); \
}
switch (src.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopyContiguousSlices unhandled data type: ",
src.dtype());
}
}
Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(*parent, *element, index));
const int64_t num_values = element->NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src = parent->base<T>() + (num_values * index); \
T* dest = element->base<T>(); \
HandleSliceToElement<T>(parent, src, dest, num_values); \
return OkStatus(); \
}
switch (parent->dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented(
"MaybeMoveSliceToElement Unhandled data type: ", element->dtype());
}
}
Status ValidateElementToLargerSlice(const Tensor& element, Tensor* parent) {
DCHECK_NE(parent->dim_size(0), 0);
if (element.NumElements() > (parent->NumElements() / parent->dim_size(0))) {
TensorShape chip_shape = parent->shape();
chip_shape.RemoveDim(0);
return errors::Internal(
"HandleElementToLargerSlice Cannot copy slice: number of entries in "
"element is greater than number of elements in parent slice. ",
"Shapes are: [element]: ", element.shape().DebugString(),
", [parent slice]: ", chip_shape.DebugString());
}
return absl::OkStatus();
}
template <typename T, int NDIMS>
Status HandleElementToLargerSlice(const Tensor& element, Tensor* parent,
int index) {
TF_RETURN_IF_ERROR(ValidateElementToLargerSlice(element, parent));
if (element.NumElements() == 0) {
return absl::OkStatus();
}
auto element_t = element.tensor<T, NDIMS>();
auto parent_t = parent->tensor<T, NDIMS + 1>();
Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_indices;
slice_indices[0] = index;
Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size;
slice_size[0] = 1;
for (size_t i = 1; i < slice_size.size(); ++i) {
slice_size[i] = element_t.dimension(i - 1);
}
parent_t.slice(slice_indices, slice_size) = element_t.reshape(slice_size);
return absl::OkStatus();
}
template <int NDIMS>
Status HandleElementToLargerSliceWithRank(const Tensor& element, Tensor* parent,
int index) {
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
return HandleElementToLargerSlice<T, NDIMS>(element, parent, index); \
}
switch (element.dtype()) {
TF_CALL_DATASET_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented(
"HandleElementToLargerSliceWithRank Unhandled data type: ",
element.dtype());
}
}
Status CopyElementToLargerSlice(const Tensor& element, Tensor* parent,
int index) {
if (parent->dims() != element.dims() + 1) {
return errors::Internal(
"Mismatched ranks. Element's rank is: ", element.dims(),
" but element is meant to be a slice in output Tensor having rank: ",
parent->dims(), " (should be: ", element.dims() + 1, ")");
}
#define HANDLE_DIMS(NDIMS) \
case NDIMS: { \
TF_RETURN_IF_ERROR( \
HandleElementToLargerSliceWithRank<NDIMS>(element, parent, index)); \
return OkStatus(); \
}
switch (element.dims()) {
HANDLE_DIMS(0);
HANDLE_DIMS(1);
HANDLE_DIMS(2);
HANDLE_DIMS(3);
HANDLE_DIMS(4);
HANDLE_DIMS(5);
#undef HANDLE_DIMS
default:
return errors::Unimplemented("CopyElementToLargerSlice Unhandled rank: ",
element.dims());
}
}
Status SetElementZero(Tensor* element, const Tensor& padding) {
#define HANDLE_TYPE(T) \
if (element->dtype() == DataTypeToEnum<T>::value) { \
element->flat<T>().setConstant(padding.scalar<T>()()); \
return OkStatus(); \
}
TF_CALL_DATASET_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
return errors::Unimplemented("SetElementZero Unhandled data type: ",
element->dtype());
}
}
} | #include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CopyContiguousSlicesTest, CompatibleShape) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 2, 0, 5, &dst);
ASSERT_EQ(error::OK, s.code());
}
TEST(CopyContiguousSlicesTest, SourceOffsetOutOfRange) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 7, 0, 5, &dst);
ASSERT_EQ(error::FAILED_PRECONDITION, s.code());
}
TEST(CopyContiguousSlicesTest, DstOffsetOutOfRange) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 0, 0, 8, &dst);
ASSERT_EQ(error::FAILED_PRECONDITION, s.code());
}
TEST(CopyContiguousSlicesTest, CheckDstWithExpectedValues) {
auto src = test::AsTensor<float>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
TensorShape({5, 2}));
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 1, 5, 3, &dst);
ASSERT_EQ(error::OK, s.code());
test::ExpectTensorEqual<float>(
test::AsTensor<float>({2, 3, 4, 5, 6, 7}, TensorShape({3, 2, 1})),
dst.Slice(5, 8));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/batch_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/batch_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d164151-1d9b-4732-8ebc-8497172ea862 | cpp | tensorflow/tensorflow | memmapped_file_system | tensorflow/core/util/memmapped_file_system.cc | tensorflow/core/util/memmapped_file_system_test.cc | #include "tensorflow/core/util/memmapped_file_system.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/memmapped_file_system.pb.h"
namespace tensorflow {
namespace {
uint64 DecodeUint64LittleEndian(const uint8* buffer) {
uint64 result = 0;
for (int i = 0; i < static_cast<int>(sizeof(uint64)); ++i) {
result |= static_cast<uint64>(buffer[i]) << (8 * i);
}
return result;
}
}
namespace {
class ReadOnlyMemoryRegionFromMemmapped : public ReadOnlyMemoryRegion {
public:
ReadOnlyMemoryRegionFromMemmapped(const void* data, uint64 length)
: data_(data), length_(length) {}
~ReadOnlyMemoryRegionFromMemmapped() override = default;
const void* data() override { return data_; }
uint64 length() override { return length_; }
private:
const void* const data_;
const uint64 length_;
};
class RandomAccessFileFromMemmapped : public RandomAccessFile {
public:
RandomAccessFileFromMemmapped(const void* data, uint64 length)
: data_(data), length_(length) {}
~RandomAccessFileFromMemmapped() override = default;
Status Name(StringPiece* result) const override {
return errors::Unimplemented(
"RandomAccessFileFromMemmapped does not support Name()");
}
Status Read(uint64 offset, size_t to_read, StringPiece* result,
char* scratch) const override {
if (offset >= length_) {
*result = StringPiece(scratch, 0);
return Status(absl::StatusCode::kOutOfRange, "Read after file end");
}
const uint64 region_left =
std::min(length_ - offset, static_cast<uint64>(to_read));
*result =
StringPiece(reinterpret_cast<const char*>(data_) + offset, region_left);
return (region_left == to_read) ? absl::OkStatus()
: Status(absl::StatusCode::kOutOfRange,
"Read less bytes than requested");
}
private:
const void* const data_;
const uint64 length_;
};
}
MemmappedFileSystem::MemmappedFileSystem() = default;
Status MemmappedFileSystem::FileExists(const string& fname,
TransactionToken* token) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(fname);
if (dir_element != directory_.end()) {
return absl::OkStatus();
}
return errors::NotFound(fname, " not found");
}
Status MemmappedFileSystem::NewRandomAccessFile(
const string& filename, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*result = std::make_unique<RandomAccessFileFromMemmapped>(
GetMemoryWithOffset(dir_element->second.offset),
dir_element->second.length);
return absl::OkStatus();
}
Status MemmappedFileSystem::NewReadOnlyMemoryRegionFromFile(
const string& filename, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*result = std::make_unique<ReadOnlyMemoryRegionFromMemmapped>(
GetMemoryWithOffset(dir_element->second.offset),
dir_element->second.length);
return absl::OkStatus();
}
Status MemmappedFileSystem::GetFileSize(const string& filename,
TransactionToken* token, uint64* size) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*size = dir_element->second.length;
return absl::OkStatus();
}
Status MemmappedFileSystem::Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) {
uint64 size;
auto status = GetFileSize(fname, token, &size);
if (status.ok()) {
stat->length = size;
}
return status;
}
Status MemmappedFileSystem::NewWritableFile(const string& filename,
TransactionToken* token,
std::unique_ptr<WritableFile>* wf) {
return errors::Unimplemented("memmapped format doesn't support writing");
}
Status MemmappedFileSystem::NewAppendableFile(
const string& filename, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
return errors::Unimplemented("memmapped format doesn't support writing");
}
Status MemmappedFileSystem::GetChildren(const string& filename,
TransactionToken* token,
std::vector<string>* strings) {
return errors::Unimplemented("memmapped format doesn't support GetChildren");
}
Status MemmappedFileSystem::GetMatchingPaths(const string& pattern,
TransactionToken* token,
std::vector<string>* results) {
return errors::Unimplemented(
"memmapped format doesn't support GetMatchingPaths");
}
Status MemmappedFileSystem::DeleteFile(const string& filename,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support DeleteFile");
}
Status MemmappedFileSystem::CreateDir(const string& dirname,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support CreateDir");
}
Status MemmappedFileSystem::DeleteDir(const string& dirname,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support DeleteDir");
}
Status MemmappedFileSystem::RenameFile(const string& filename_from,
const string& filename_to,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support RenameFile");
}
const void* MemmappedFileSystem::GetMemoryWithOffset(uint64 offset) const {
return reinterpret_cast<const uint8*>(mapped_memory_->data()) + offset;
}
constexpr const char MemmappedFileSystem::kMemmappedPackagePrefix[];
constexpr const char MemmappedFileSystem::kMemmappedPackageDefaultGraphDef[];
Status MemmappedFileSystem::InitializeFromFile(Env* env,
const string& filename) {
TF_RETURN_IF_ERROR(
env->NewReadOnlyMemoryRegionFromFile(filename, &mapped_memory_));
directory_.clear();
if (mapped_memory_->length() <= sizeof(uint64)) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid package size");
}
const auto memory_start =
reinterpret_cast<const uint8*>(mapped_memory_->data());
const uint64 directory_offset = DecodeUint64LittleEndian(
memory_start + mapped_memory_->length() - sizeof(uint64));
if (directory_offset > mapped_memory_->length() - sizeof(uint64)) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid directory offset");
}
MemmappedFileSystemDirectory proto_directory;
if (!ParseProtoUnlimited(
&proto_directory, memory_start + directory_offset,
mapped_memory_->length() - directory_offset - sizeof(uint64))) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Can't parse its internal directory");
}
uint64 prev_element_offset = directory_offset;
for (auto element_iter = proto_directory.element().rbegin();
element_iter != proto_directory.element().rend(); ++element_iter) {
if (element_iter->offset() >= prev_element_offset) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid offset of internal component");
}
if (!directory_
.insert(std::make_pair(
element_iter->name(),
FileRegion(element_iter->offset(), element_iter->length())))
.second) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Duplicate name of internal component ",
element_iter->name());
}
prev_element_offset = element_iter->offset();
}
return absl::OkStatus();
}
bool MemmappedFileSystem::IsMemmappedPackageFilename(const string& filename) {
return absl::StartsWith(filename, kMemmappedPackagePrefix);
}
namespace {
bool IsValidRegionChar(char c) {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') || c == '_' || c == '.';
}
}
bool MemmappedFileSystem::IsWellFormedMemmappedPackageFilename(
const string& filename) {
if (!IsMemmappedPackageFilename(filename)) {
return false;
}
for (char c :
filename.substr(strlen(kMemmappedPackagePrefix),
filename.length() - strlen(kMemmappedPackagePrefix))) {
if (!IsValidRegionChar(c)) {
return false;
}
}
return true;
}
MemmappedEnv::MemmappedEnv(Env* env) : EnvWrapper(env) {}
Status MemmappedEnv::GetFileSystemForFile(const string& fname,
FileSystem** result) {
if (MemmappedFileSystem::IsMemmappedPackageFilename(fname)) {
if (!memmapped_file_system_) {
return errors::FailedPrecondition(
"MemmappedEnv is not initialized from a file.");
}
*result = memmapped_file_system_.get();
return absl::OkStatus();
}
return EnvWrapper::GetFileSystemForFile(fname, result);
}
Status MemmappedEnv::GetRegisteredFileSystemSchemes(
std::vector<string>* schemes) {
const auto status = EnvWrapper::GetRegisteredFileSystemSchemes(schemes);
if (status.ok()) {
schemes->emplace_back(MemmappedFileSystem::kMemmappedPackagePrefix);
}
return status;
}
Status MemmappedEnv::InitializeFromFile(const string& package_filename) {
std::unique_ptr<MemmappedFileSystem> file_system_ptr(new MemmappedFileSystem);
const auto status =
file_system_ptr->InitializeFromFile(target(), package_filename);
if (status.ok()) {
memmapped_file_system_ = std::move(file_system_ptr);
}
return status;
}
} | #include "tensorflow/core/util/memmapped_file_system.h"
#include <memory>
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/memmapped_file_system_writer.h"
#ifdef PLATFORM_WINDOWS
#undef DeleteFile
#endif
namespace tensorflow {
namespace {
constexpr char kTensor1FileName[] = "memmapped_package:
constexpr char kTensor2FileName[] = "memmapped_package:
constexpr char kProtoFileName[] = "memmapped_package:
constexpr int kTestGraphDefVersion = 666;
Status CreateMemmappedFileSystemFile(const string& filename, bool corrupted,
Tensor* test_tensor) {
Env* env = Env::Default();
MemmappedFileSystemWriter writer;
TF_RETURN_IF_ERROR(writer.InitializeToFile(env, filename));
test::FillFn<float>(test_tensor,
[](int i) { return static_cast<float>(i * i); });
TF_RETURN_IF_ERROR(writer.SaveTensor(*test_tensor, kTensor1FileName));
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(kTestGraphDefVersion);
graph_def.mutable_versions()->set_min_consumer(kTestGraphDefVersion);
TF_RETURN_IF_ERROR(writer.SaveProtobuf(graph_def, kProtoFileName));
test::FillFn<float>(test_tensor,
[](int i) { return static_cast<float>(i) * i * i; });
TF_RETURN_IF_ERROR(writer.SaveTensor(*test_tensor, kTensor2FileName));
if (!corrupted) {
TF_RETURN_IF_ERROR(writer.FlushAndClose());
}
return absl::OkStatus();
}
TEST(MemmappedFileSystemTest, SimpleTest) {
const TensorShape test_tensor_shape = {10, 200};
Tensor test_tensor(DT_FLOAT, test_tensor_shape);
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "memmapped_env_test");
TF_ASSERT_OK(CreateMemmappedFileSystemFile(filename, false, &test_tensor));
MemmappedEnv memmapped_env(Env::Default());
TF_ASSERT_OK(memmapped_env.InitializeFromFile(filename));
GraphDef test_graph_def;
TF_EXPECT_OK(
ReadBinaryProto(&memmapped_env, kProtoFileName, &test_graph_def));
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().producer());
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().min_consumer());
std::unique_ptr<ReadOnlyMemoryRegion> memory_region;
TF_ASSERT_OK(memmapped_env.NewReadOnlyMemoryRegionFromFile(kTensor2FileName,
&memory_region));
ASSERT_GE(memory_region->length(), test_tensor.TotalBytes());
EXPECT_EQ(test_tensor.tensor_data(),
StringPiece(static_cast<const char*>(memory_region->data()),
test_tensor.TotalBytes()));
uint64 file_size = 0;
TF_ASSERT_OK(memmapped_env.GetFileSize(kTensor2FileName, &file_size));
EXPECT_EQ(test_tensor.TotalBytes(), file_size);
FileStatistics stat;
TF_ASSERT_OK(memmapped_env.Stat(kTensor2FileName, &stat));
EXPECT_EQ(test_tensor.TotalBytes(), stat.length);
EXPECT_EQ(
error::NOT_FOUND,
memmapped_env.NewReadOnlyMemoryRegionFromFile("bla-bla", &memory_region)
.code());
TF_EXPECT_OK(memmapped_env.FileExists(kTensor2FileName));
EXPECT_EQ(error::Code::NOT_FOUND,
memmapped_env.FileExists("bla-bla-bla").code());
}
TEST(MemmappedFileSystemTest, NotInitialized) {
MemmappedEnv memmapped_env(Env::Default());
std::unique_ptr<ReadOnlyMemoryRegion> memory_region;
EXPECT_EQ(
error::FAILED_PRECONDITION,
memmapped_env
.NewReadOnlyMemoryRegionFromFile(kTensor1FileName, &memory_region)
.code());
std::unique_ptr<RandomAccessFile> file;
EXPECT_EQ(error::FAILED_PRECONDITION,
memmapped_env.NewRandomAccessFile(kProtoFileName, &file).code());
}
TEST(MemmappedFileSystemTest, Corrupted) {
const TensorShape test_tensor_shape = {100, 200};
Tensor test_tensor(DT_FLOAT, test_tensor_shape);
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "memmapped_env_corrupted_test");
TF_ASSERT_OK(CreateMemmappedFileSystemFile(filename, true, &test_tensor));
MemmappedFileSystem memmapped_env;
ASSERT_NE(memmapped_env.InitializeFromFile(Env::Default(), filename),
absl::OkStatus());
}
TEST(MemmappedFileSystemTest, ProxyToDefault) {
MemmappedEnv memmapped_env(Env::Default());
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "test_file");
std::unique_ptr<WritableFile> writable_file_temp;
TF_ASSERT_OK(memmapped_env.NewAppendableFile(filename, &writable_file_temp));
const auto adh = [&memmapped_env, &filename](WritableFile* f) {
delete f;
TF_CHECK_OK(memmapped_env.DeleteFile(filename));
};
std::unique_ptr<WritableFile, decltype(adh)> writable_file(
writable_file_temp.release(), adh);
const string test_string = "bla-bla-bla";
TF_ASSERT_OK(writable_file->Append(test_string));
TF_ASSERT_OK(writable_file->Close());
uint64 file_length = 0;
TF_EXPECT_OK(memmapped_env.GetFileSize(filename, &file_length));
EXPECT_EQ(test_string.length(), file_length);
FileStatistics stat;
TF_EXPECT_OK(memmapped_env.Stat(filename, &stat));
EXPECT_EQ(test_string.length(), stat.length);
std::unique_ptr<RandomAccessFile> random_access_file;
TF_ASSERT_OK(
memmapped_env.NewRandomAccessFile(filename, &random_access_file));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/memmapped_file_system.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/memmapped_file_system_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
934c29ae-afea-4099-b759-9b5f96655cef | cpp | tensorflow/tensorflow | stat_summarizer | tensorflow/core/util/stat_summarizer.cc | tensorflow/core/util/stat_summarizer_test.cc | #include "tensorflow/core/util/stat_summarizer.h"
#include <iomanip>
#include <map>
#include <queue>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using Detail = StatsCalculator::Detail;
StatSummarizer::StatSummarizer(const StatSummarizerOptions& options)
: stats_calculator_(new StatsCalculator(options)) {}
StatSummarizer::StatSummarizer(const tensorflow::GraphDef& tensorflow_graph)
: stats_calculator_(new StatsCalculator(StatSummarizerOptions())) {}
StatSummarizer::~StatSummarizer() = default;
void StatSummarizer::Validate(const std::vector<TensorDescription>* outputs,
const NodeExecStats& ns) const {
if (outputs->size() != ns.output_size()) {
LOG(WARNING) << "Number of outputs changed between runs for '"
<< ns.node_name() << "' - was " << outputs->size() << ", now "
<< ns.output_size();
} else {
for (const auto& output : ns.output()) {
const int32_t slot = output.slot();
if ((slot < 0) || (slot >= ns.output_size())) {
continue;
}
const auto& stored = (*outputs)[slot];
const auto& current = output.tensor_description();
bool do_tensors_match =
(stored.dtype() == current.dtype()) &&
(stored.shape().dim_size() == current.shape().dim_size());
if (do_tensors_match) {
for (int i = 0; i < stored.shape().dim_size(); ++i) {
if (stored.shape().dim(i).size() != current.shape().dim(i).size()) {
do_tensors_match = false;
break;
}
}
}
if (!do_tensors_match) {
LOG(WARNING) << "Output tensor changed between runs for '"
<< ns.node_name();
}
}
}
}
void StatSummarizer::PrintStepStats() const {
string output = GetOutputString();
std::istringstream iss(output);
for (std::string line; std::getline(iss, line);) {
LOG(INFO) << line;
}
}
namespace {
std::string OpType(const DeviceStepStats& ds, const NodeExecStats& ns) {
if (absl::StrContains(ds.device(), "/stream") ||
absl::StrContains(ds.device(), "/memcpy")) {
return "<>";
}
const std::string sep(" = ");
const std::string& label = ns.timeline_label();
std::string::size_type start = label.find(sep);
if (start == std::string::npos) return "<>";
start += sep.size();
std::string::size_type end = label.find('(', start);
if (end == std::string::npos) return "<>";
return label.substr(start, end - start);
}
}
void StatSummarizer::ProcessStepStats(const StepStats& step_stats) {
int64_t curr_total_us = 0;
int64_t mem_total = 0;
int node_num = 0;
for (const auto& ds : step_stats.dev_stats()) {
for (const auto& ns : ds.node_stats()) {
if (absl::StrContains(ds.device(), "/stream") &&
!absl::StrContains(ds.device(), "/stream:all")) {
continue;
}
if (absl::StrContains(ds.device(), "/host:CPU")) {
continue;
}
std::string name = ns.node_name();
std::string op_type = "<>";
if (absl::StrContains(ds.device(), "/stream")) {
auto parts = str_util::Split(ns.node_name(), ':');
if (parts.size() == 2) {
name = parts[0] + " [Kernel]";
op_type = "gpu:" + parts[1];
}
} else if (absl::StrContains(ds.device(), "/memcpy")) {
auto parts = str_util::Split(ns.node_name(), ':');
if (parts.size() == 2 || parts.size() == 3) {
name = parts.front() + " [MemCpy]";
op_type = "gpu:" + parts.back();
}
} else {
op_type = OpType(ds, ns);
}
++node_num;
const int64_t curr_time = ns.all_end_rel_micros();
curr_total_us += curr_time;
auto output_result =
outputs_.emplace(name, std::vector<TensorDescription>());
std::vector<TensorDescription>* outputs = &(output_result.first->second);
int64_t rel_end_us = curr_time;
if (output_result.second) {
outputs->resize(ns.output_size());
for (const auto& output : ns.output()) {
const int32_t slot = output.slot();
if ((slot < 0) || (slot >= ns.output_size())) {
continue;
}
(*outputs)[slot] = output.tensor_description();
}
}
int64_t curr_node_mem = 0;
for (const auto& mem : ns.memory()) {
const int64_t mem_usage = mem.total_bytes();
curr_node_mem += mem_usage;
}
stats_calculator_->AddNodeStats(name, op_type, node_num, rel_end_us,
curr_node_mem);
mem_total += curr_node_mem;
Validate(outputs, ns);
}
}
stats_calculator_->UpdateRunTotalUs(curr_total_us);
stats_calculator_->UpdateMemoryUsed(mem_total);
}
void StatSummarizer::PrintOutputs() const {
std::priority_queue<
std::pair<int64_t, const std::pair<const std::string, Detail>*>>
timings;
for (const auto& entry : stats_calculator_->GetDetails()) {
timings.emplace(-entry.second.run_order, &entry);
}
LOG(INFO) << "============ Node output tensor sizes in run order ========";
while (!timings.empty()) {
auto entry = timings.top();
timings.pop();
std::stringstream stream;
const auto detail_outputs = outputs_.at(entry.second->first);
stream << entry.second->first << "\t" << detail_outputs.size();
for (const auto& tensor : detail_outputs) {
stream << "\t" << DataTypeString(tensor.dtype());
stream << "\t" << tensor.shape().dim_size();
for (const auto& d : tensor.shape().dim()) {
stream << "\t" << d.size();
}
}
LOG(INFO) << stream.str();
}
}
} | #include "tensorflow/core/util/stat_summarizer.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
TEST(StatSummarizerTest, ExtractsOpTypes) {
const std::string graph_def_str(R"EOF(
node {
name: "myconstant"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
versions {
producer: 21
}
)EOF");
GraphDef graph_def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(graph_def_str, &graph_def));
std::unique_ptr<Session> session(NewSession(SessionOptions()));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph_def));
RunOptions run_options;
run_options.set_trace_level(RunOptions::FULL_TRACE);
RunMetadata run_metadata;
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(run_options, {}, {"myconstant:0"}, {}, &outputs,
&run_metadata));
StatSummarizer stats(graph_def);
stats.ProcessStepStats(run_metadata.step_stats());
const std::string output = stats.GetOutputString();
const std::string by_node_type = stats.GetStatsByNodeType();
ASSERT_TRUE(absl::StrContains(output, "Const")) << output;
ASSERT_TRUE(absl::StrContains(output, "myconstant")) << output;
ASSERT_TRUE(absl::StrContains(by_node_type, "Const")) << by_node_type;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/stat_summarizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/stat_summarizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05402f47-5603-4c59-b33b-796f157484b4 | cpp | tensorflow/tensorflow | dump_graph | tensorflow/compiler/mlir/tensorflow/utils/dump_graph.cc | tensorflow/compiler/mlir/tensorflow/utils/dump_graph_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/dump_graph.h"
#include <cstdint>
#include <cstring>
#include <string>
#include <utility>
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Verifier.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
struct WritableFileRawStream : public llvm::raw_ostream {
explicit WritableFileRawStream(WritableFile* file) : file(file) {
SetUnbuffered();
}
~WritableFileRawStream() override = default;
uint64_t current_pos() const override { return 0; }
void write_impl(const char* ptr, size_t size) override {
if (file) {
Status s = file->Append(StringPiece(ptr, size));
if (!s.ok()) {
LOG(WARNING) << "Write failed: " << s;
file = nullptr;
}
}
}
WritableFile* file;
};
}
Status DumpTextualIRToFile(const MlirDumpConfig& config, const Graph& graph,
const FunctionLibraryDefinition* flib_def,
WritableFile* file) {
WritableFileRawStream os(std::move(file));
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module;
if (flib_def) {
flib_def = &graph.flib_def();
}
auto convert = [&]() -> Status {
mlir::StatusScopedDiagnosticHandler status_handler(&context);
GraphDebugInfo debug_info;
switch (config.dialect) {
case MlirDumpConfig::Dialect::kTFG: {
TF_ASSIGN_OR_RETURN(module,
mlir::tfg::ImportGraphAndFunctionsToMlir(
&context, debug_info, graph,
flib_def ? *flib_def : graph.flib_def()));
break;
}
}
if (failed(mlir::verify(*module))) {
return status_handler.ConsumeStatus();
}
return status_handler.ConsumeStatus();
};
TF_RETURN_IF_ERROR(convert());
module->print(os, config.op_printing_flags);
return absl::OkStatus();
}
void UseMlirForGraphDump(const MlirDumpConfig& config) {
SetGraphDumper(
[config](const Graph& graph, const FunctionLibraryDefinition* flib_def,
WritableFile* file) -> Status {
return DumpTextualIRToFile(config, graph, flib_def, file);
},
".mlir");
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/dump_graph.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
void ExpectHasSubstr(const string& s, const string& expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectHasNoSubstr(const string& s, const string& expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' should not contain '" << expected << "'";
}
class StringWritableFile : public WritableFile {
public:
explicit StringWritableFile(string* str) : str_(*str) {}
Status Append(StringPiece data) override {
absl::StrAppend(&str_, data);
return absl::OkStatus();
}
Status Close() override { return absl::OkStatus(); }
Status Flush() override { return absl::OkStatus(); }
Status Name(StringPiece* result) const override {
*result = "(string)";
return absl::OkStatus();
}
Status Sync() override { return absl::OkStatus(); }
Status Tell(int64_t* position) override {
return errors::Unimplemented("Stream not seekable");
}
private:
string& str_;
};
TEST(Dump, TextualIrToFileSuccess) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
UseMlirForGraphDump(MlirDumpConfig());
string ret = DumpGraphToFile("tir", graph);
ASSERT_EQ(ret, io::JoinPath(testing::TmpDir(), "tir.mlir"));
string actual;
TF_ASSERT_OK(ReadFileToString(Env::Default(), ret, &actual));
}
TEST(Dump, TextualIrWithOptions) {
Graph graph(OpRegistry::Global());
Node* node;
TF_ASSERT_OK(NodeBuilder("A", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &node));
string actual;
StringWritableFile file(&actual);
TF_ASSERT_OK(DumpTextualIRToFile(MlirDumpConfig().emit_location_information(),
graph, nullptr, &file));
string expected_substr = R"(loc(#loc))";
ExpectHasSubstr(actual, expected_substr);
}
TEST(Dump, DumpToTFG) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
string actual;
StringWritableFile file(&actual);
TF_ASSERT_OK(DumpTextualIRToFile(
MlirDumpConfig().emit_dialect(MlirDumpConfig::Dialect::kTFG), graph,
nullptr, &file));
string expected_substr("tfg.graph");
ExpectHasSubstr(actual, expected_substr);
string not_expected_substr("tf_executor.island");
ExpectHasNoSubstr(actual, not_expected_substr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/dump_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/dump_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d9b2f2f-d60b-4eec-b8fb-be7dca0e0400 | cpp | tensorflow/tensorflow | bad_indices_policy | tensorflow/core/util/bad_indices_policy.cc | tensorflow/core/util/bad_indices_policy_test.cc | #include "tensorflow/core/util/bad_indices_policy.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
constexpr char kDefault[] = "DEFAULT";
constexpr char kErrorStr[] = "ERROR";
constexpr char kIgnoreStr[] = "IGNORE";
absl::StatusOr<BadIndicesPolicy> BadIndicesPolicyFromString(
absl::string_view str) {
if (str.empty()) return BadIndicesPolicy::kDefault;
if (str == kDefault) return BadIndicesPolicy::kDefault;
if (str == kErrorStr) return BadIndicesPolicy::kError;
if (str == kIgnoreStr) return BadIndicesPolicy::kIgnore;
return absl::InvalidArgumentError(
absl::StrCat("Unknown bad indices handling attribute: ", str));
}
} | #include "tensorflow/core/util/bad_indices_policy.h"
#include <gmock/gmock.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kDefault = "DEFAULT";
constexpr absl::string_view kErrorStr = "ERROR";
constexpr absl::string_view kIgnoreStr = "IGNORE";
class BadIndicesPolicyFromStringTest : public ::testing::Test {
protected:
void TestValidInput(absl::string_view input, BadIndicesPolicy expected) {
absl::StatusOr<BadIndicesPolicy> result = BadIndicesPolicyFromString(input);
ASSERT_TRUE(result.ok());
EXPECT_EQ(result.value(), expected);
}
};
TEST_F(BadIndicesPolicyFromStringTest, EmptyString) {
TestValidInput("", BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, DefaultKeyword) {
TestValidInput(kDefault, BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, ErrorKeyword) {
TestValidInput(kErrorStr, BadIndicesPolicy::kError);
}
TEST_F(BadIndicesPolicyFromStringTest, IgnoreKeyword) {
TestValidInput(kIgnoreStr, BadIndicesPolicy::kIgnore);
}
TEST_F(BadIndicesPolicyFromStringTest, InvalidInput) {
absl::StatusOr<BadIndicesPolicy> result =
BadIndicesPolicyFromString("unknown");
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("Unknown bad indices handling attribute"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bad_indices_policy.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bad_indices_policy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71d031db-472c-419f-8962-e94574569eb3 | cpp | tensorflow/tensorflow | incremental_barrier | tensorflow/core/util/incremental_barrier.cc | tensorflow/core/util/incremental_barrier_test.cc | #include "tensorflow/core/util/incremental_barrier.h"
#include <atomic>
#include <functional>
#include <utility>
#include "absl/functional/bind_front.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
class InternalIncrementalBarrier {
public:
explicit InternalIncrementalBarrier(IncrementalBarrier::DoneCallback callback)
: left_(1), done_callback_(std::move(callback)) {}
void operator()() {
DCHECK_GE(left_.load(std::memory_order_relaxed), 0);
if (left_.fetch_sub(1, std::memory_order_acq_rel) - 1 == 0) {
IncrementalBarrier::DoneCallback done_callback =
std::move(done_callback_);
delete this;
done_callback();
}
}
IncrementalBarrier::BarrierCallback Inc() {
left_.fetch_add(1, std::memory_order_acq_rel);
return absl::bind_front(&InternalIncrementalBarrier::operator(), this);
}
private:
std::atomic<int> left_;
IncrementalBarrier::DoneCallback done_callback_;
};
IncrementalBarrier::IncrementalBarrier(DoneCallback done_callback)
: internal_barrier_(
new InternalIncrementalBarrier(std::move(done_callback))) {}
IncrementalBarrier::~IncrementalBarrier() { (*internal_barrier_)(); }
IncrementalBarrier::BarrierCallback IncrementalBarrier::Inc() {
return internal_barrier_->Inc();
}
} | #include "tensorflow/core/util/incremental_barrier.h"
#include <atomic>
#include "absl/functional/bind_front.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
namespace {
class Counter {
public:
void Increment() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
++count_;
}
int GetCount() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return count_;
}
private:
mutex mu_;
int count_ = 0;
};
TEST(IncrementalBarrierTest, RunInstantlyWhenZeroClosure) {
Counter counter;
EXPECT_EQ(counter.GetCount(), 0);
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
EXPECT_EQ(counter.GetCount(), 0);
}
EXPECT_EQ(counter.GetCount(), 1);
}
TEST(IncrementalBarrierTest, RunAfterNumClosuresOneNowTwoLater) {
Counter counter;
IncrementalBarrier::BarrierCallback bc1, bc2;
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
CHECK_EQ(counter.GetCount(), 0);
bc1 = barrier.Inc();
bc2 = barrier.Inc();
IncrementalBarrier::BarrierCallback bc3 = barrier.Inc();
bc3();
CHECK_EQ(counter.GetCount(), 0);
}
CHECK_EQ(counter.GetCount(), 0);
bc1();
CHECK_EQ(counter.GetCount(), 0);
bc2();
CHECK_EQ(counter.GetCount(), 1);
}
TEST(IncrementalBarrierTest, RunAfterNumClosuresConcurrency) {
const int num_closure = 100, num_thread = 2;
std::atomic<int> schedule_count{0};
Counter counter;
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
CHECK_EQ(counter.GetCount(), 0);
tensorflow::thread::ThreadPool pool(tensorflow::Env::Default(),
"BarrierClosure", num_thread);
for (int i = 0; i < num_closure; ++i) {
pool.Schedule([&barrier, &schedule_count]() {
schedule_count.fetch_add(1);
IncrementalBarrier::BarrierCallback bc = barrier.Inc();
Env::Default()->SleepForMicroseconds(100);
bc();
});
}
CHECK_EQ(counter.GetCount(), 0);
}
CHECK_EQ(schedule_count.load(std::memory_order_relaxed), 100);
CHECK_EQ(counter.GetCount(), 1);
}
#if defined(PLATFORM_GOOGLE)
void BM_FunctionInc(benchmark::State& state) {
IncrementalBarrier barrier([] {});
for (auto _ : state) {
barrier.Inc()();
}
}
BENCHMARK(BM_FunctionInc);
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/incremental_barrier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/incremental_barrier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4d56199-05c4-4fc6-ac2b-3999333517b7 | cpp | tensorflow/tensorflow | tensor_slice_set | tensorflow/core/util/tensor_slice_set.cc | tensorflow/core/util/tensor_slice_set_test.cc | #include "tensorflow/core/util/tensor_slice_set.h"
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
TensorSliceSet::TensorSliceSet(const TensorShape& shape, DataType type)
: shape_(shape), type_(type) {}
TensorSliceSet::~TensorSliceSet() = default;
Status TensorSliceSet::Register(const TensorSlice& slice, const string& tag) {
TensorShape result_shape;
TF_RETURN_IF_ERROR(slice.SliceTensorShape(shape_, &result_shape));
string str = slice.DebugString();
if (slices_.empty()) {
slices_hull_ = slice;
} else {
if (slices_hull_.Overlaps(slice)) {
for (const auto& x : slices_) {
if (slice.Overlaps(x.second.slice)) {
return errors::Internal("Overlapping slices: existing slice = ",
x.first, ", new slice = ", str);
}
}
}
slices_hull_.UpdateToCover(slice);
}
TensorSliceSet::SliceInfo info = {slice, tag, result_shape.num_elements()};
slices_.insert(std::make_pair(str, info));
return absl::OkStatus();
}
bool TensorSliceSet::QueryMeta(
const TensorSlice& slice,
std::vector<std::pair<TensorSlice, string>>* results) const {
results->clear();
Status s;
string str = slice.DebugString();
const TensorSliceSet::SliceInfo* info = gtl::FindOrNull(slices_, str);
if (info) {
results->emplace_back(std::make_pair(info->slice, info->tag));
return true;
} else {
TensorShape target_shape;
Status s;
s = slice.SliceTensorShape(shape_, &target_shape);
if (!s.ok()) {
LOG(WARNING) << s;
return false;
}
int64_t total_size = target_shape.num_elements();
int64_t overlap_size = 0;
TensorSlice intersection;
TensorShape inter_shape;
for (const auto& x : slices_) {
if (slice.Intersect(x.second.slice, &intersection)) {
s = intersection.SliceTensorShape(shape_, &inter_shape);
if (!s.ok()) {
LOG(WARNING) << s;
return false;
}
overlap_size += inter_shape.num_elements();
results->emplace_back(std::make_pair(x.second.slice, x.second.tag));
}
}
if (total_size == overlap_size) {
return true;
} else {
results->clear();
return false;
}
}
}
Status RegisterTensorSlice(
const string& name, const TensorShape& shape, DataType type,
const string& tag, const TensorSlice& slice,
std::unordered_map<string, TensorSliceSet*>* tensor_slices) {
DCHECK_NE(tensor_slices, nullptr);
TensorSliceSet* tss = gtl::FindPtrOrNull(*tensor_slices, name);
if (!tss) {
tss = new TensorSliceSet(shape, type);
tensor_slices->insert(std::make_pair(name, tss));
} else {
const TensorShape& tss_shape(tss->shape());
if (!shape.IsSameSize(tss_shape)) {
return errors::Internal("Incompatible tensor shapes detected for tensor ",
name, ": existing = ", tss_shape.DebugString(),
", new = ", shape.DebugString());
}
if (type != tss->type()) {
return errors::Internal("Incompatible tensor types detected for tensor ",
name,
": existing = ", DataTypeString(tss->type()),
", new = ", DataTypeString(type));
}
}
return tss->Register(slice, tag);
}
}
} | #include "tensorflow/core/util/tensor_slice_set.h"
#include <utility>
#include <vector>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace checkpoint {
namespace {
TEST(TensorSliceSetTest, QueryMetaTwoD) {
TensorShape shape({4, 5});
TensorSliceSet tss(shape, DT_INT32);
TensorSlice slice_1 = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(tss.Register(slice_1, "slice_1"));
TensorSlice slice_2 = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(tss.Register(slice_2, "slice_2"));
TensorSlice slice_3 = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(tss.Register(slice_3, "slice_3"));
{
TensorSlice s = TensorSlice::ParseOrDie("0,2:-");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_TRUE(tss.QueryMeta(s, &results));
EXPECT_EQ(1, results.size());
EXPECT_EQ("0,2:-", results[0].first.DebugString());
EXPECT_EQ("slice_1", results[0].second);
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,1:-");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_TRUE(tss.QueryMeta(s, &results));
EXPECT_EQ(1, results.size());
EXPECT_EQ("0,2:-", results[0].first.DebugString());
EXPECT_EQ("slice_1", results[0].second);
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:0,3");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_TRUE(tss.QueryMeta(s, &results));
EXPECT_EQ(2, results.size());
if (results[0].second == "slice_2") {
EXPECT_EQ("2,2:0,3", results[0].first.DebugString());
EXPECT_EQ("slice_2", results[0].second);
EXPECT_EQ("0,2:-", results[1].first.DebugString());
EXPECT_EQ("slice_1", results[1].second);
} else {
EXPECT_EQ("0,2:-", results[0].first.DebugString());
EXPECT_EQ("slice_1", results[0].second);
EXPECT_EQ("2,2:0,3", results[1].first.DebugString());
EXPECT_EQ("slice_2", results[1].second);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_FALSE(tss.QueryMeta(s, &results));
EXPECT_EQ(0, results.size());
}
}
static void BM_RegisterOneByOne(::testing::benchmark::State& state) {
TensorShape shape({static_cast<int>(state.max_iterations), 41});
TensorSliceSet slice_set(shape, DT_INT32);
int i = 0;
for (auto s : state) {
TensorSlice part({{i, 1}, {0, -1}});
TF_CHECK_OK(slice_set.Register(part, part.DebugString()));
++i;
}
}
BENCHMARK(BM_RegisterOneByOne);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_set.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_set_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a1745d7-8808-458a-b1a5-20bfab632659 | cpp | tensorflow/tensorflow | ragged_to_dense_util | tensorflow/core/util/ragged_to_dense_util.cc | tensorflow/core/util/ragged_to_dense_util_test.cc | #include "tensorflow/core/util/ragged_to_dense_util.h"
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
namespace tensorflow {
using errors::InvalidArgument;
tensorflow::Status GetRowPartitionTypesHelper(
const std::vector<string>& row_partition_type_strings,
std::vector<RowPartitionType>* row_partition_types) {
*row_partition_types = GetRowPartitionTypesHelper(row_partition_type_strings);
if (row_partition_types->size() != row_partition_type_strings.size()) {
return InvalidArgument(
"Unknown string for partition info type: ",
row_partition_type_strings.at(row_partition_types->size()));
}
return absl::OkStatus();
}
tensorflow::Status CombineRaggedTensorToTensorShapes(
int ragged_rank, const TensorShapeProto& shape,
const TensorShapeProto& value_shape, TensorShapeProto* output_shape) {
if (value_shape.unknown_rank() && shape.unknown_rank()) {
output_shape->Clear();
output_shape->set_unknown_rank(true);
return absl::OkStatus();
}
if (shape.unknown_rank()) {
while (output_shape->dim_size() < ragged_rank + value_shape.dim_size()) {
output_shape->add_dim()->set_size(-1);
}
} else {
*output_shape = shape;
}
if (value_shape.unknown_rank()) {
return absl::OkStatus();
}
if (ragged_rank + value_shape.dim_size() != output_shape->dim_size()) {
return InvalidArgument(
"rt_input.shape and shape=", TensorShape::DebugString(shape),
" are incompatible: rt_input.rank = ",
ragged_rank + value_shape.dim_size(),
" but shape.rank = ", output_shape->dim_size());
}
for (int i = 1; i < value_shape.dim_size(); ++i) {
const TensorShapeProto::Dim& value_dim = value_shape.dim(i);
TensorShapeProto::Dim* output_shape_dim = output_shape->mutable_dim(
output_shape->dim_size() - value_shape.dim_size() + i);
if (value_dim.size() >= 0) {
if (output_shape_dim->size() >= 0) {
if (output_shape_dim->size() != value_dim.size()) {
return InvalidArgument(
"rt_input.shape and shape=", TensorShape::DebugString(shape),
" are incompatible: rt_input.shape[", i + ragged_rank,
"] = ", value_dim.size(), " but shape[", i + ragged_rank,
"] = ", output_shape_dim->size());
}
} else {
output_shape_dim->set_size(value_dim.size());
}
}
}
return absl::OkStatus();
}
tensorflow::Status ValidateDefaultValueShape(
const TensorShapeProto& default_value_shape,
const TensorShapeProto& value_shape) {
if (default_value_shape.unknown_rank() || value_shape.unknown_rank()) {
return absl::OkStatus();
}
int default_ndims = default_value_shape.dim_size();
int values_ndims = value_shape.dim_size();
if (default_ndims >= values_ndims) {
return InvalidArgument(
"default_value.shape=", TensorShape::DebugString(default_value_shape),
" and rt_input.flat_values.shape=",
TensorShape::DebugString(value_shape),
" are incompatible: default_value.rank = ", default_ndims,
" must be less than rt_input.flat_values.rank = ", values_ndims);
}
for (int i = 0; i < std::min(default_ndims, values_ndims - 1); ++i) {
int default_dim = default_value_shape.dim(i).size();
int value_dim = value_shape.dim(i + 1).size();
if (default_dim >= 0 && value_dim >= 0 && default_dim != 1 &&
default_dim != value_dim) {
return InvalidArgument(
"default_value.shape=", TensorShape::DebugString(default_value_shape),
" and rt_input.flat_values.shape=",
TensorShape::DebugString(value_shape),
" are incompatible: default_value.shape[",
i - default_value_shape.dim_size(), "] = ", default_dim,
" but rt_input.flat_values.shape[",
i - default_value_shape.dim_size(), "] = ", value_dim);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/ragged_to_dense_util.h"
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CombineRaggedTensorToTensorShapes, UnknownShapeUnknownValue) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.set_unknown_rank(true);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
EXPECT_EQ(true, actual_output_shape_proto.unknown_rank());
}
TEST(CombineRaggedTensorToTensorShapes, UnknownShape) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
ASSERT_EQ(actual_output_shape_proto.dim_size(), 2);
EXPECT_EQ(actual_output_shape_proto.dim(0).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(1).size(), -1);
}
TEST(CombineRaggedTensorToTensorShapes, UnknownShapeDenseValue) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
value_shape_proto.add_dim()->set_size(3);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
ASSERT_EQ(actual_output_shape_proto.dim_size(), 3);
EXPECT_EQ(actual_output_shape_proto.dim(0).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(1).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(2).size(), 3);
}
TEST(GetRowPartitionTypesHelper, BasicTest) {
const std::vector<string> row_partition_type_strings = {
"FIRST_DIM_SIZE", "VALUE_ROWIDS", "ROW_SPLITS"};
std::vector<RowPartitionType> row_partition_types;
TF_ASSERT_OK(GetRowPartitionTypesHelper(row_partition_type_strings,
&row_partition_types));
EXPECT_THAT(row_partition_types,
::testing::ElementsAre(RowPartitionType::FIRST_DIM_SIZE,
RowPartitionType::VALUE_ROWIDS,
RowPartitionType::ROW_SPLITS));
}
TEST(RowPartitionTypeToString, BasicTest) {
EXPECT_EQ("FIRST_DIM_SIZE",
RowPartitionTypeToString(RowPartitionType::FIRST_DIM_SIZE));
EXPECT_EQ("VALUE_ROWIDS",
RowPartitionTypeToString(RowPartitionType::VALUE_ROWIDS));
EXPECT_EQ("ROW_SPLITS",
RowPartitionTypeToString(RowPartitionType::ROW_SPLITS));
}
TEST(ValidateDefaultValueShape, UnknownDefaultValueShape) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, UnknownValueShape) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(5);
TensorShapeProto value_shape_proto;
value_shape_proto.set_unknown_rank(true);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, ScalarShape) {
TensorShapeProto default_value_shape_proto;
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorShapeEqual) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(2);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(2);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionUnknown) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(2);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionUnknownForValue) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(2);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionFewDims) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, WrongNumberOfDimensions) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(-1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(-1);
EXPECT_FALSE(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)
.ok());
}
TEST(ValidateDefaultValueShape, WrongDimensionSize) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
default_value_shape_proto.add_dim()->set_size(-1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(6);
value_shape_proto.add_dim()->set_size(-1);
EXPECT_FALSE(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)
.ok());
}
TEST(ValidateDefaultValueShape, WrongDimensionSizeBut1) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
default_value_shape_proto.add_dim()->set_size(1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(3);
value_shape_proto.add_dim()->set_size(7);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/ragged_to_dense_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/ragged_to_dense_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35055de0-39d9-4889-8071-f4779761f1e2 | cpp | tensorflow/tensorflow | example_proto_helper | tensorflow/core/util/example_proto_helper.cc | tensorflow/core/util/example_proto_helper_test.cc | #include "tensorflow/core/util/example_proto_helper.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
Status CheckValidType(const DataType& dtype) {
switch (dtype) {
case DT_INT64:
case DT_FLOAT:
case DT_STRING:
return absl::OkStatus();
default:
return errors::InvalidArgument("Received input dtype: ",
DataTypeString(dtype));
}
}
Status CheckTypesMatch(const Feature& feature, const DataType& dtype,
bool* match) {
switch (dtype) {
case DT_INT64:
*match = (feature.kind_case() == Feature::kInt64List);
break;
case DT_FLOAT:
*match = (feature.kind_case() == Feature::kFloatList);
break;
case DT_STRING:
*match = (feature.kind_case() == Feature::kBytesList);
break;
default:
return errors::InvalidArgument("Invalid input dtype: ",
DataTypeString(dtype));
}
return absl::OkStatus();
}
Status FeatureDenseCopy(const std::size_t out_index, const string& name,
const string& key, const DataType& dtype,
const TensorShape& shape, const Feature& feature,
Tensor* out) {
const std::size_t num_elements = shape.num_elements();
const std::size_t offset = out_index * num_elements;
switch (dtype) {
case DT_INT64: {
const Int64List& values = feature.int64_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key: ", key, ", Index: ", out_index,
". Number of int64 values != expected. "
"values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<int64_t>().data() + offset;
std::copy_n(values.value().data(), num_elements, out_p);
return absl::OkStatus();
}
case DT_FLOAT: {
const FloatList& values = feature.float_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key: ", key, ", Index: ", out_index,
". Number of float values != expected. "
"values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<float>().data() + offset;
std::copy_n(values.value().data(), num_elements, out_p);
return absl::OkStatus();
}
case DT_STRING: {
const BytesList& values = feature.bytes_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key ", key, ", Index: ", out_index,
". Number of bytes values != expected. "
"Values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<tstring>().data() + offset;
std::transform(values.value().data(),
values.value().data() + num_elements, out_p,
[](const string* s) { return *s; });
return absl::OkStatus();
}
default:
return errors::InvalidArgument("Invalid input dtype: ",
DataTypeString(dtype));
}
}
Tensor FeatureSparseCopy(const std::size_t batch, const string& key,
const DataType& dtype, const Feature& feature) {
switch (dtype) {
case DT_INT64: {
const Int64List& values = feature.int64_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<int64_t>().data();
std::copy_n(values.value().data(), num_elements, out_p);
return out;
}
case DT_FLOAT: {
const FloatList& values = feature.float_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<float>().data();
std::copy_n(values.value().data(), num_elements, out_p);
return out;
}
case DT_STRING: {
const BytesList& values = feature.bytes_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<tstring>().data();
std::transform(values.value().data(),
values.value().data() + num_elements, out_p,
[](const string* s) { return *s; });
return out;
}
default:
LOG(FATAL) << "not supposed to be here. dtype requested: " << dtype;
}
}
int64_t CopyIntoSparseTensor(const Tensor& in, const int batch,
const int64_t offset, Tensor* indices,
Tensor* values) {
const int64_t num_elements = in.shape().num_elements();
const DataType& dtype = in.dtype();
CHECK_EQ(dtype, values->dtype());
if (num_elements > 0) {
auto ix_t = indices->matrix<int64_t>();
int64_t* ix_p = &ix_t(offset, 0);
for (int64_t i = 0; i < num_elements; ++i, ix_p += 2) {
*ix_p = batch;
*(ix_p + 1) = i;
}
}
switch (dtype) {
case DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
values->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
values->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::copy_n(in.flat<tstring>().data(), num_elements,
values->flat<tstring>().data() + offset);
break;
}
default:
LOG(FATAL) << "Not supposed to be here. Saw dtype: " << dtype;
}
return num_elements;
}
void RowDenseCopy(const std::size_t& out_index, const DataType& dtype,
const Tensor& in, Tensor* out) {
const std::size_t num_elements = in.shape().num_elements();
const std::size_t offset = out_index * num_elements;
switch (dtype) {
case DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
out->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
out->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::copy_n(in.flat<tstring>().data(), num_elements,
out->flat<tstring>().data() + offset);
break;
}
default:
LOG(FATAL) << "Not supposed to be here. Saw dtype: " << dtype;
}
}
Status SingleExampleProtoToTensors(
const Example& example, const string& example_name, const int batch_index,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features,
std::vector<Tensor*>* output_dense_values_tensor,
std::vector<std::vector<Tensor>>* output_sparse_values_tmp) {
const Features& features = example.features();
const auto& feature_dict = features.feature();
for (size_t d = 0; d < fixed_len_features.size(); ++d) {
const FixedLenFeature& feature_config = fixed_len_features[d];
const string& key = feature_config.key;
const DataType& dtype = feature_config.dtype;
const TensorShape& shape = feature_config.shape;
const Tensor& default_value = feature_config.default_value;
bool required = (default_value.NumElements() == 0);
const auto& feature_found = feature_dict.find(key);
const bool feature_has_data =
(feature_found != feature_dict.end() &&
(feature_found->second.kind_case() != Feature::KIND_NOT_SET));
const bool required_ok = feature_has_data || !required;
if (!required_ok) {
return errors::InvalidArgument("Name: ", example_name, ", Feature: ", key,
" is required but could not be found.");
}
if (feature_has_data) {
const Feature& f = feature_found->second;
bool types_match;
TF_RETURN_IF_ERROR(CheckTypesMatch(f, dtype, &types_match));
if (!types_match) {
return errors::InvalidArgument("Name: ", example_name,
", Feature: ", key,
". Data types don't match. ",
"Expected type: ", DataTypeString(dtype),
" Feature is: ", f.DebugString());
}
TF_RETURN_IF_ERROR(FeatureDenseCopy(batch_index, example_name, key, dtype,
shape, f,
(*output_dense_values_tensor)[d]));
} else {
RowDenseCopy(batch_index, dtype, default_value,
(*output_dense_values_tensor)[d]);
}
}
for (size_t d = 0; d < var_len_features.size(); ++d) {
const VarLenFeature& feature_config = var_len_features[d];
const string& key = feature_config.key;
const DataType& dtype = feature_config.dtype;
const auto& feature_found = feature_dict.find(key);
const bool feature_has_data =
(feature_found != feature_dict.end() &&
(feature_found->second.kind_case() != Feature::KIND_NOT_SET));
if (feature_has_data) {
const Feature& f = feature_found->second;
bool types_match;
TF_RETURN_IF_ERROR(CheckTypesMatch(f, dtype, &types_match));
if (!types_match) {
return errors::InvalidArgument("Name: ", example_name,
", Feature: ", key,
". Data types don't match. ",
"Expected type: ", DataTypeString(dtype),
" Feature is: ", f.DebugString());
}
(*output_sparse_values_tmp)[d][batch_index] =
FeatureSparseCopy(batch_index, key, dtype, f);
} else {
(*output_sparse_values_tmp)[d][batch_index] =
Tensor(dtype, TensorShape({0}));
}
}
return absl::OkStatus();
}
Status GetSparseTensorShapes(const VarLenFeature& var_len_feature,
const std::vector<Tensor>& sparse_values_tmp,
const int batch_size,
VarLenFeatureBatchShapes* output_shapes) {
int64_t total_num_features = 0;
int64_t max_num_features = 0;
for (int b = 0; b < batch_size; ++b) {
const Tensor& t = sparse_values_tmp[b];
const int64_t num_elements = t.shape().num_elements();
total_num_features += num_elements;
max_num_features = std::max(max_num_features, num_elements);
}
output_shapes->indices_shape.AddDim(total_num_features);
output_shapes->indices_shape.AddDim(2);
output_shapes->values_shape.AddDim(total_num_features);
output_shapes->max_num_features = max_num_features;
return absl::OkStatus();
}
Status BatchExampleProtoToTensors(
const std::vector<const Example*>& examples,
const std::vector<string>& names,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features, Allocator* allocator,
std::vector<Tensor>* output_dense_values_tensor,
std::vector<Tensor>* output_sparse_indices_tensor,
std::vector<Tensor>* output_sparse_values_tensor,
std::vector<Tensor>* output_sparse_shapes_tensor) {
const int batch_size = examples.size();
const bool has_names = (!names.empty());
if (has_names) {
if (names.size() != examples.size()) {
return errors::InvalidArgument(
"Expected len(names) == len(examples), but got: ", names.size(),
" vs. ", examples.size());
}
}
std::vector<Tensor*> output_dense_values_tensor_ptrs(
fixed_len_features.size());
for (size_t d = 0; d < fixed_len_features.size(); ++d) {
const FixedLenFeature& config = fixed_len_features[d];
TensorShape out_shape;
out_shape.AddDim(batch_size);
const TensorShape& shape = config.shape;
const DataType& dtype = config.dtype;
for (const int dim : shape.dim_sizes()) out_shape.AddDim(dim);
(*output_dense_values_tensor)[d] = Tensor(allocator, dtype, out_shape);
output_dense_values_tensor_ptrs[d] = &(*output_dense_values_tensor)[d];
}
std::vector<std::vector<Tensor>> sparse_values_tmp(var_len_features.size());
for (size_t d = 0; d < var_len_features.size(); ++d) {
sparse_values_tmp[d] = std::vector<Tensor>(batch_size);
}
for (size_t b = 0; b < examples.size(); ++b) {
const Example& ex = *(examples[b]);
const string& example_name = (has_names) ? names[b] : "<unknown>";
TF_RETURN_IF_ERROR(SingleExampleProtoToTensors(
ex, example_name, b, fixed_len_features, var_len_features,
&output_dense_values_tensor_ptrs, &sparse_values_tmp));
}
for (size_t d = 0; d < var_len_features.size(); ++d) {
const VarLenFeature& feature_config = var_len_features[d];
const DataType& dtype = feature_config.dtype;
const std::vector<Tensor>& sparse_values_tensor = sparse_values_tmp[d];
VarLenFeatureBatchShapes sparse_tensor_batch_shapes;
TF_RETURN_IF_ERROR(GetSparseTensorShapes(feature_config,
sparse_values_tensor, batch_size,
&sparse_tensor_batch_shapes));
const TensorShape& indices_shape = sparse_tensor_batch_shapes.indices_shape;
const TensorShape& values_shape = sparse_tensor_batch_shapes.values_shape;
(*output_sparse_indices_tensor)[d] =
Tensor(allocator, DT_INT64, indices_shape);
(*output_sparse_values_tensor)[d] = Tensor(allocator, dtype, values_shape);
(*output_sparse_shapes_tensor)[d] =
Tensor(allocator, DT_INT64, TensorShape({2}));
auto shape_t = (*output_sparse_shapes_tensor)[d].vec<int64_t>();
shape_t(0) = batch_size;
shape_t(1) = sparse_tensor_batch_shapes.max_num_features;
Tensor* sp_indices_d = &(*output_sparse_indices_tensor)[d];
Tensor* sp_values_d = &(*output_sparse_values_tensor)[d];
int64_t offset = 0;
for (int b = 0; b < batch_size; ++b) {
const int64_t num_elements = CopyIntoSparseTensor(
sparse_values_tensor[b], b, offset, sp_indices_d, sp_values_d);
offset += num_elements;
}
}
return absl::OkStatus();
}
Status ParseExampleAttrs::FinishInit(int op_version) {
switch (op_version) {
case 1:
num_ragged = 0;
break;
case 2:
num_dense = dense_types.size();
num_ragged = ragged_value_types.size();
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
if (static_cast<size_t>(num_sparse) != sparse_types.size()) {
return errors::InvalidArgument("len(sparse_keys) != len(sparse_types)");
}
if (static_cast<size_t>(num_dense) != dense_types.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_types)");
}
if (static_cast<size_t>(num_dense) != dense_shapes.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_shapes)");
}
if (static_cast<size_t>(num_ragged) != ragged_value_types.size()) {
return errors::InvalidArgument(
"len(ragged_keys) != len(ragged_value_types)");
}
if (static_cast<size_t>(num_ragged) != ragged_split_types.size()) {
return errors::InvalidArgument(
"len(ragged_keys) != len(ragged_split_types)");
}
if (num_dense > std::numeric_limits<int32>::max()) {
return errors::InvalidArgument("num_dense_ too large");
}
for (const DataType& type : dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : ragged_value_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : ragged_split_types) {
if (!(type == DT_INT64 || type == DT_INT32)) {
return errors::InvalidArgument("Invalid ragged_split_type: ",
DataTypeString(type));
}
}
return absl::OkStatus();
}
Status ParseSingleExampleAttrs::FinishInit() {
if (sparse_keys.size() != sparse_types.size()) {
return errors::InvalidArgument("len(sparse_keys) != len(sparse_types)");
}
if (dense_keys.size() != dense_types.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_types)");
}
if (dense_keys.size() != dense_shapes.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_shapes)");
}
for (const DataType& type : dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
return absl::OkStatus();
}
Status ParseSequenceExampleAttrs::FinishInit(int op_version) {
switch (op_version) {
case 1:
num_context_ragged = 0;
num_feature_list_ragged = 0;
if (num_context_sparse != context_sparse_keys.size()) {
return errors::InvalidArgument(
"num_context_sparse (", num_context_sparse,
") must match the size of context_sparse_keys (",
context_sparse_keys.size(), ")");
}
if (num_context_dense != context_dense_keys.size()) {
return errors::InvalidArgument(
"num_context_dense (", num_context_dense,
") must match the size of context_dense_keys (",
context_dense_keys.size(), ")");
}
if (num_feature_list_sparse != feature_list_sparse_keys.size()) {
return errors::InvalidArgument(
"num_feature_list_sparse (", num_feature_list_sparse,
") must match the size of feature_list_sparse_keys (",
feature_list_sparse_keys.size(), ")");
}
if (num_feature_list_dense != feature_list_dense_keys.size()) {
return errors::InvalidArgument(
"num_feature_list_dense (", num_feature_list_dense,
") must match the size of feature_list_dense_keys (",
feature_list_dense_keys.size(), ")");
}
break;
case 2:
num_context_dense = context_dense_types.size();
num_context_ragged = context_ragged_value_types.size();
num_feature_list_ragged = feature_list_ragged_value_types.size();
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
if (num_context_sparse != context_sparse_types.size()) {
return errors::InvalidArgument(
"num_context_sparse (", num_context_sparse,
") must match the size of context_sparse_types (",
context_sparse_types.size(), ")");
}
if (num_context_dense != context_dense_types.size() ||
num_context_dense != context_dense_shapes.size()) {
return errors::InvalidArgument(
"num_context_dense (", num_context_dense,
") must match the size of context_dense_types (",
context_dense_types.size(), ") and context_dense_shapes (",
context_dense_shapes.size(), ")");
}
if ((num_context_ragged != context_ragged_value_types.size()) ||
(num_context_ragged != context_ragged_split_types.size())) {
return errors::InvalidArgument(
"num_context_ragged (", num_context_ragged,
") must match the size of context_ragged_value_types (",
context_ragged_value_types.size(), ") and context_ragged_split_types (",
context_ragged_split_types.size(), ")");
}
if (num_feature_list_sparse != feature_list_sparse_types.size()) {
return errors::InvalidArgument(
"num_feature_list_sparse (", num_feature_list_sparse,
") must match the size of feature_list_sparse_types (",
feature_list_sparse_types.size(), ")");
}
if (num_feature_list_dense != feature_list_dense_types.size() ||
num_feature_list_dense != feature_list_dense_shapes.size()) {
return errors::InvalidArgument(
"num_feature_list_dense (", num_feature_list_dense,
") must match the size of feature_list_dense_types (",
feature_list_dense_types.size(), ") and feature_list_dense_shapes (",
feature_list_dense_shapes.size(), ")");
}
if ((num_feature_list_ragged != feature_list_ragged_value_types.size()) ||
(num_feature_list_ragged != feature_list_ragged_split_types.size())) {
return errors::InvalidArgument(
"num_feature_list_ragged (", num_feature_list_ragged,
") must match the size of feature_list_ragged_value_types (",
feature_list_ragged_value_types.size(),
") and feature_list_ragged_split_types (",
feature_list_ragged_split_types.size(), ")");
}
for (const DataType& type : context_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_ragged_value_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_ragged_split_types) {
if (!(type == DT_INT64 || type == DT_INT32)) {
return errors::InvalidArgument("Invalid context_ragged_split_type: ",
DataTypeString(type));
}
}
for (const DataType& type : feature_list_ragged_value_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_ragged_split_types) {
if (!(type == DT_INT64 || type == DT_INT32)) {
return errors::InvalidArgument("Invalid feature_list_ragged_split_type: ",
DataTypeString(type));
}
}
return absl::OkStatus();
}
Status ParseSingleSequenceExampleAttrs::FinishInit() {
if (static_cast<size_t>(num_context_sparse) != context_sparse_types.size()) {
return errors::InvalidArgument(
"len(context_sparse_keys) != len(context_sparse_types)");
}
if (static_cast<size_t>(num_context_dense) != context_dense_types.size()) {
return errors::InvalidArgument(
"len(context_dense_keys) != len(context_dense_types)");
}
if (static_cast<size_t>(num_context_dense) != context_dense_shapes.size()) {
return errors::InvalidArgument(
"len(context_dense_keys) != len(context_dense_shapes)");
}
if (static_cast<size_t>(num_feature_list_sparse) !=
feature_list_sparse_types.size()) {
return errors::InvalidArgument(
"len(feature_list_sparse_keys) != len(feature_list_sparse_types)");
}
if (static_cast<size_t>(num_feature_list_dense) !=
feature_list_dense_types.size()) {
return errors::InvalidArgument(
"len(feature_list_dense_keys) != "
"len(feature_list_dense_types)");
}
for (const DataType& type : context_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
return absl::OkStatus();
}
Status GetDenseShapes(const std::vector<PartialTensorShape>& dense_shapes,
std::vector<bool>* variable_length,
std::vector<std::size_t>* elements_per_stride) {
for (int i = 0; i < dense_shapes.size(); ++i) {
bool shape_ok = true;
if (dense_shapes[i].dims() == -1) {
shape_ok = false;
} else {
for (int d = 1; d < dense_shapes[i].dims(); ++d) {
if (dense_shapes[i].dim_size(d) == -1) {
shape_ok = false;
}
}
}
if (!shape_ok) {
return errors::InvalidArgument(
"dense_shapes[", i,
"] has unknown rank or unknown inner dimensions: ",
dense_shapes[i].DebugString());
}
TensorShape dense_shape;
if (dense_shapes[i].dims() > 0 && dense_shapes[i].dim_size(0) == -1) {
variable_length->push_back(true);
for (int d = 1; d < dense_shapes[i].dims(); ++d) {
dense_shape.AddDim(dense_shapes[i].dim_size(d));
}
} else {
variable_length->push_back(false);
dense_shapes[i].AsTensorShape(&dense_shape);
}
elements_per_stride->push_back(dense_shape.num_elements());
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/example_proto_helper.h"
#include <cstdint>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
namespace tensorflow {
namespace {
TEST(CopyIntoSparseTensorTest, String) {
Tensor in_tensor(DT_STRING, TensorShape({2}));
in_tensor.flat<tstring>()(0) = "hello";
in_tensor.flat<tstring>()(1) = "world";
int n_values = 5;
Tensor ix_tensor(DT_INT64, TensorShape({n_values, 2}));
auto ix_matrix = ix_tensor.matrix<int64_t>();
for (int i = 0; i < n_values; ++i) {
for (int j = 0; j < 2; ++j) {
ix_matrix(i, j) = 0;
}
}
Tensor value_tensor(DT_STRING, TensorShape({n_values}));
int batch = 67;
int64_t offset = 1;
auto n_elems =
CopyIntoSparseTensor(in_tensor, batch, offset, &ix_tensor, &value_tensor);
EXPECT_EQ(2, n_elems);
EXPECT_EQ(0, ix_matrix(0, 0));
EXPECT_EQ(0, ix_matrix(0, 1));
EXPECT_EQ(batch, ix_matrix(1, 0));
EXPECT_EQ(0, ix_matrix(1, 1));
EXPECT_EQ(batch, ix_matrix(2, 0));
EXPECT_EQ(1, ix_matrix(2, 1));
EXPECT_EQ(0, ix_matrix(3, 0));
EXPECT_EQ(0, ix_matrix(3, 1));
EXPECT_EQ(0, ix_matrix(4, 0));
EXPECT_EQ(0, ix_matrix(4, 1));
auto values = value_tensor.flat<tstring>();
EXPECT_EQ("", values(0));
EXPECT_EQ("hello", values(1));
EXPECT_EQ("world", values(2));
EXPECT_EQ("", values(3));
EXPECT_EQ("", values(4));
}
constexpr char kDenseInt64Key[] = "dense_int64";
constexpr char kDenseFloatKey[] = "dense_float";
constexpr char kDenseStringKey[] = "dense_string";
constexpr char kSparseInt64Key[] = "sparse_int64";
constexpr char kSparseFloatKey[] = "sparse_float";
constexpr char kSparseStringKey[] = "sparse_string";
class SingleExampleProtoToTensorsTest : public ::testing::Test {
protected:
void SetUp() override {
FixedLenFeature int64_dense_config;
int64_dense_config.key = kDenseInt64Key;
int64_dense_config.dtype = DT_INT64;
int64_dense_config.shape = TensorShape({1});
int64_dense_config.default_value = Tensor(DT_INT64, TensorShape({1}));
int64_dense_config.default_value.scalar<int64_t>()() = 0;
dense_vec_.push_back(int64_dense_config);
FixedLenFeature float_dense_config;
float_dense_config.key = kDenseFloatKey;
float_dense_config.dtype = DT_FLOAT;
float_dense_config.shape = TensorShape({1});
float_dense_config.default_value = Tensor(DT_FLOAT, TensorShape({1}));
float_dense_config.default_value.scalar<float>()() = 0.0;
dense_vec_.push_back(float_dense_config);
FixedLenFeature string_dense_config;
string_dense_config.key = kDenseStringKey;
string_dense_config.dtype = DT_STRING;
string_dense_config.shape = TensorShape({1});
string_dense_config.default_value = Tensor(DT_STRING, TensorShape({1}));
string_dense_config.default_value.scalar<tstring>()() = "default";
dense_vec_.push_back(string_dense_config);
VarLenFeature int64_sparse_config;
int64_sparse_config.key = kSparseInt64Key;
int64_sparse_config.dtype = DT_INT64;
sparse_vec_.push_back(int64_sparse_config);
VarLenFeature float_sparse_config;
float_sparse_config.key = kSparseFloatKey;
float_sparse_config.dtype = DT_FLOAT;
sparse_vec_.push_back(float_sparse_config);
VarLenFeature string_sparse_config;
string_sparse_config.key = kSparseStringKey;
string_sparse_config.dtype = DT_STRING;
sparse_vec_.push_back(string_sparse_config);
}
std::vector<FixedLenFeature> dense_vec_;
std::vector<VarLenFeature> sparse_vec_;
};
TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyTrivial) {
Example ex;
(*ex.mutable_features()->mutable_feature())[kSparseInt64Key]
.mutable_int64_list()
->add_value(42);
(*ex.mutable_features()->mutable_feature())[kSparseFloatKey]
.mutable_float_list()
->add_value(4.2);
(*ex.mutable_features()->mutable_feature())[kSparseStringKey]
.mutable_bytes_list()
->add_value("forty-two");
std::vector<Tensor*> output_dense_values(0);
std::vector<std::vector<Tensor>> output_sparse_values_tmp(3);
for (int i = 0; i < 3; ++i) {
output_sparse_values_tmp[i] = std::vector<Tensor>(1);
}
std::vector<FixedLenFeature> empty_dense_vec;
TF_EXPECT_OK(SingleExampleProtoToTensors(ex, "", 0, empty_dense_vec,
sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0];
EXPECT_EQ(1, int64_tensor_vec.size());
EXPECT_EQ(42, int64_tensor_vec[0].vec<int64_t>()(0));
const std::vector<Tensor>& float_tensor_vec = output_sparse_values_tmp[1];
EXPECT_EQ(1, float_tensor_vec.size());
EXPECT_NEAR(4.2, float_tensor_vec[0].vec<float>()(0), 0.001);
const std::vector<Tensor>& string_tensor_vec = output_sparse_values_tmp[2];
EXPECT_EQ(1, string_tensor_vec.size());
EXPECT_EQ("forty-two", string_tensor_vec[0].vec<tstring>()(0));
}
TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyEmpty) {
Example empty;
std::vector<Tensor*> output_dense_values(0);
std::vector<std::vector<Tensor>> output_sparse_values_tmp(3);
for (int i = 0; i < 3; ++i) {
output_sparse_values_tmp[i] = std::vector<Tensor>(1);
}
std::vector<FixedLenFeature> empty_dense_vec;
TF_EXPECT_OK(SingleExampleProtoToTensors(empty, "", 0, empty_dense_vec,
sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0];
EXPECT_EQ(1, int64_tensor_vec.size());
EXPECT_EQ(0, int64_tensor_vec[0].vec<int64_t>().size());
const std::vector<Tensor>& float_tensor_vec = output_sparse_values_tmp[1];
EXPECT_EQ(1, float_tensor_vec.size());
EXPECT_EQ(0, float_tensor_vec[0].vec<float>().size());
const std::vector<Tensor>& string_tensor_vec = output_sparse_values_tmp[2];
EXPECT_EQ(1, string_tensor_vec.size());
EXPECT_EQ(0, string_tensor_vec[0].vec<tstring>().size());
}
TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyTrivial) {
Example ex;
(*ex.mutable_features()->mutable_feature())[kDenseInt64Key]
.mutable_int64_list()
->add_value(42);
(*ex.mutable_features()->mutable_feature())[kDenseFloatKey]
.mutable_float_list()
->add_value(4.2);
(*ex.mutable_features()->mutable_feature())[kDenseStringKey]
.mutable_bytes_list()
->add_value("forty-two");
std::vector<Tensor*> output_dense_values(3);
Tensor int64_dense_output(DT_INT64, TensorShape({1, 1}));
output_dense_values[0] = &int64_dense_output;
Tensor float_dense_output(DT_FLOAT, TensorShape({1, 1}));
output_dense_values[1] = &float_dense_output;
Tensor str_dense_output(DT_STRING, TensorShape({1, 1}));
output_dense_values[2] = &str_dense_output;
std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp;
TF_EXPECT_OK(SingleExampleProtoToTensors(
ex, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_TRUE(output_sparse_values_tmp.empty());
EXPECT_EQ(1, int64_dense_output.matrix<int64_t>().size());
EXPECT_EQ(42, int64_dense_output.matrix<int64_t>()(0, 0));
EXPECT_EQ(1, float_dense_output.matrix<float>().size());
EXPECT_NEAR(4.2, float_dense_output.matrix<float>()(0, 0), 0.001);
EXPECT_EQ(1, str_dense_output.matrix<tstring>().size());
EXPECT_EQ("forty-two", str_dense_output.matrix<tstring>()(0, 0));
}
TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyDefaults) {
std::vector<Tensor*> output_dense_values(3);
Tensor int64_dense_output(DT_INT64, TensorShape({1, 1}));
output_dense_values[0] = &int64_dense_output;
Tensor float_dense_output(DT_FLOAT, TensorShape({1, 1}));
output_dense_values[1] = &float_dense_output;
Tensor str_dense_output(DT_STRING, TensorShape({1, 1}));
output_dense_values[2] = &str_dense_output;
Example empty;
std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp;
TF_EXPECT_OK(SingleExampleProtoToTensors(
empty, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_EQ(1, int64_dense_output.matrix<int64_t>().size());
EXPECT_EQ(0, int64_dense_output.matrix<int64_t>()(0, 0));
EXPECT_EQ(1, float_dense_output.matrix<float>().size());
EXPECT_NEAR(0.0, float_dense_output.matrix<float>()(0, 0), 0.001);
EXPECT_EQ(1, str_dense_output.matrix<tstring>().size());
EXPECT_EQ("default", str_dense_output.matrix<tstring>()(0, 0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/example_proto_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/example_proto_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
96b1761e-033d-4e9d-a447-5c2336991e90 | cpp | tensorflow/tensorflow | saved_tensor_slice_util | tensorflow/core/util/saved_tensor_slice_util.cc | tensorflow/core/util/saved_tensor_slice_util_test.cc | #include "tensorflow/core/util/saved_tensor_slice_util.h"
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/ordered_code.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace checkpoint {
const char kSavedTensorSlicesKey[] = "";
string EncodeTensorNameSlice(const string& name, const TensorSlice& slice) {
string buffer;
tensorflow::strings::OrderedCode::WriteNumIncreasing(&buffer, 0);
tensorflow::strings::OrderedCode::WriteString(&buffer, name);
tensorflow::strings::OrderedCode::WriteNumIncreasing(&buffer, slice.dims());
for (int d = 0; d < slice.dims(); ++d) {
tensorflow::strings::OrderedCode::WriteSignedNumIncreasing(&buffer,
slice.start(d));
tensorflow::strings::OrderedCode::WriteSignedNumIncreasing(&buffer,
slice.length(d));
}
return buffer;
}
Status DecodeTensorNameSlice(const string& code, string* name,
tensorflow::TensorSlice* slice) {
StringPiece src(code);
uint64 x;
if (!tensorflow::strings::OrderedCode::ReadNumIncreasing(&src, &x)) {
return errors::Internal("Failed to parse the leading number: src = ", src);
}
if (x != 0) {
return errors::Internal(
"The leading number should always be 0 for any valid key: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadString(&src, name)) {
return errors::Internal("Failed to parse the tensor name: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadNumIncreasing(&src, &x)) {
return errors::Internal("Failed to parse the tensor rank: src = ", src);
}
if (x == 0) {
return errors::Internal("Expecting positive rank of the tensor, got ", x,
", src = ", src);
}
if (x >= kint32max) {
return errors::Internal("Too many elements ", x);
}
slice->SetFullSlice(x);
for (int d = 0; d < static_cast<int32>(x); ++d) {
int64_t start, length;
if (!tensorflow::strings::OrderedCode::ReadSignedNumIncreasing(&src,
&start)) {
return errors::Internal("Failed to parse start: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadSignedNumIncreasing(&src,
&length)) {
return errors::Internal("Failed to parse length: src = ", src);
}
if (length >= 0) {
slice->set_start(d, start);
slice->set_length(d, length);
}
}
return absl::OkStatus();
}
Status ParseShapeAndSlice(const string& shape_and_slice, TensorShape* shape,
TensorSlice* slice, TensorShape* shape_slice) {
CHECK(!shape_and_slice.empty());
std::vector<string> splits = str_util::Split(shape_and_slice, ' ');
if (splits.size() < 2) {
return errors::InvalidArgument(
"Need least two elements in shape_and_slice specification: ",
shape_and_slice);
}
slice->Clear();
auto status = slice->Parse(splits.back(), slice);
if (!status.ok()) return status;
splits.pop_back();
shape->Clear();
for (const auto& s : splits) {
int64_t dim;
if (!strings::safe_strto64(s, &dim)) {
return errors::InvalidArgument(
"Non numerical dimension in shape_and_slice: ", shape_and_slice);
}
shape->AddDim(dim);
}
return slice->SliceTensorShape(*shape, shape_slice);
}
}
} | #include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace checkpoint {
namespace {
TEST(TensorShapeUtilTest, TensorNameSliceToOrderedCode) {
{
TensorSlice s = TensorSlice::ParseOrDie("-:-:1,3:4,5");
string buffer = EncodeTensorNameSlice("foo", s);
string name;
s.Clear();
TF_CHECK_OK(DecodeTensorNameSlice(buffer, &name, &s));
EXPECT_EQ("foo", name);
EXPECT_EQ("-:-:1,3:4,5", s.DebugString());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/saved_tensor_slice_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/saved_tensor_slice_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef97cfe5-51f4-4301-b684-664f141364be | cpp | tensorflow/tensorflow | tensor_format | tensorflow/core/util/tensor_format.cc | tensorflow/core/util/tensor_format_test.cc | #include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
string GetConvnetDataFormatAttrString() {
return "data_format: { 'NHWC', 'NCHW' } = 'NHWC' ";
}
string GetConvnet3dDataFormatAttrString() {
return "data_format: { 'NDHWC', 'NCDHW' } = 'NDHWC' ";
}
string GetConvnetDataFormat2D3DAttrString() {
return "data_format: { 'NHWC', 'NCHW', 'NDHWC', 'NCDHW' } = 'NHWC' ";
}
string GetConvnetFilterFormatAttrString() {
return "filter_format: { 'HWIO', 'OIHW' } = 'HWIO' ";
}
string GetConvnet3dFilterFormatAttrString() {
return "filter_format: { 'DHWIO', 'OIDHW' } = 'DHWIO' ";
}
string ToString(TensorFormat format) {
switch (format) {
case FORMAT_NHWC:
return "NHWC";
case FORMAT_NCHW:
return "NCHW";
case FORMAT_NCHW_VECT_C:
return "NCHW_VECT_C";
case FORMAT_NHWC_VECT_W:
return "NHWC_VECT_W";
case FORMAT_HWNC:
return "HWNC";
case FORMAT_HWCN:
return "HWCN";
default:
LOG(FATAL) << "Invalid Format: " << static_cast<int32>(format);
return "INVALID_FORMAT";
}
}
string ToString(FilterTensorFormat format) {
switch (format) {
case FORMAT_HWIO:
return "HWIO";
case FORMAT_OIHW:
return "OIHW";
case FORMAT_OHWI:
return "OHWI";
case FORMAT_OIHW_VECT_I:
return "OIHW_VECT_I";
default:
LOG(FATAL) << "Invalid Filter Format: " << static_cast<int32>(format);
return "INVALID_FORMAT";
}
}
bool FormatFromString(absl::string_view format_str, TensorFormat* format) {
if (format_str == "NHWC" || format_str == "NDHWC") {
*format = FORMAT_NHWC;
return true;
}
if (format_str == "NCHW" || format_str == "NCDHW") {
*format = FORMAT_NCHW;
return true;
}
if (format_str == "NCHW_VECT_C") {
*format = FORMAT_NCHW_VECT_C;
return true;
}
if (format_str == "NHWC_VECT_W") {
*format = FORMAT_NHWC_VECT_W;
return true;
}
if (format_str == "HWNC") {
*format = FORMAT_HWNC;
return true;
}
if (format_str == "HWCN") {
*format = FORMAT_HWCN;
return true;
}
return false;
}
bool FilterFormatFromString(absl::string_view format_str,
FilterTensorFormat* format) {
if (format_str == "HWIO" || format_str == "DHWIO") {
*format = FORMAT_HWIO;
return true;
}
if (format_str == "OIHW" || format_str == "OIDHW") {
*format = FORMAT_OIHW;
return true;
}
if (format_str == "OIHW_VECT_I") {
*format = FORMAT_OIHW_VECT_I;
return true;
}
return false;
}
} | #include <utility>
#include "tensorflow/core/util/tensor_format.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
#define EnumStringPair(val) \
{ val, #val }
std::pair<TensorFormat, const char*> test_data_formats[] = {
EnumStringPair(FORMAT_NHWC), EnumStringPair(FORMAT_NCHW),
EnumStringPair(FORMAT_NCHW_VECT_C), EnumStringPair(FORMAT_NHWC_VECT_W),
EnumStringPair(FORMAT_HWNC), EnumStringPair(FORMAT_HWCN),
};
std::pair<FilterTensorFormat, const char*> test_filter_formats[] = {
EnumStringPair(FORMAT_HWIO),
EnumStringPair(FORMAT_OIHW),
EnumStringPair(FORMAT_OIHW_VECT_I),
};
struct TensorDimMap {
int n() const { return dim_n; }
int h() const { return dim_h; }
int w() const { return dim_w; }
int c() const { return dim_c; }
int spatial(int spatial_index) const { return spatial_dim[spatial_index]; }
int dim_n, dim_h, dim_w, dim_c;
int spatial_dim[3];
};
struct FilterDimMap {
int h() const { return dim_h; }
int w() const { return dim_w; }
int i() const { return dim_i; }
int o() const { return dim_o; }
int spatial(int spatial_index) const { return spatial_dim[spatial_index]; }
int dim_h, dim_w, dim_i, dim_o;
int spatial_dim[3];
};
struct DimMaps {
#define StaCoExTensorDm static constexpr TensorDimMap
StaCoExTensorDm kTdmInvalid = { -1, -1, -1, -1, { -1, -1, -1 } };
StaCoExTensorDm kTdmNHWC[4] = { kTdmInvalid,
{ 0, -1, 1, 2, { 1, -1, -1 } },
{ 0, 1, 2, 3, { 1, 2, -1 } },
{ 0, 2, 3, 4, { 1, 2, 3 } }
};
StaCoExTensorDm kTdmNCHW[4] = { kTdmInvalid,
{ 0, -1, 2, 1, { 2, -1, -1 } },
{ 0, 2, 3, 1, { 2, 3, -1 } },
{ 0, 3, 4, 1, { 2, 3, 4 } }
};
StaCoExTensorDm kTdmHWNC[4] = { kTdmInvalid,
{ 1, -1, 0, 2, { 0, -1, -1 } },
{ 2, 0, 1, 3, { 0, 1, -1 } },
{ 3, 1, 2, 4, { 0, 1, 2 } }
};
StaCoExTensorDm kTdmHWCN[4] = { kTdmInvalid,
{ 2, -1, 0, 1, { 0, -1, -1 } },
{ 3, 0, 1, 2, { 0, 1, -1 } },
{ 4, 1, 2, 3, { 0, 1, 2 } }
};
#undef StaCoExTensorDm
#define StaCoExFilterDm static constexpr FilterDimMap
StaCoExFilterDm kFdmInvalid = { -1, -1, -1, -1, { -1, -1, -1 } };
StaCoExFilterDm kFdmHWIO[4] = { kFdmInvalid,
{ -1, 0, 1, 2, { 0, -1, -1 } },
{ 0, 1, 2, 3, { 0, 1, -1 } },
{ 1, 2, 3, 4, { 0, 1, 2 } }
};
StaCoExFilterDm kFdmOIHW[4] = { kFdmInvalid,
{ -1, 2, 1, 0, { 2, -1, -1 } },
{ 2, 3, 1, 0, { 2, 3, -1 } },
{ 3, 4, 1, 0, { 2, 3, 4 } }
};
#undef StaCoExFilterDm
};
inline constexpr const TensorDimMap&
GetTensorDimMap(const int num_spatial_dims, const TensorFormat format) {
return
(format == FORMAT_NHWC ||
format == FORMAT_NHWC_VECT_W) ? DimMaps::kTdmNHWC[num_spatial_dims] :
(format == FORMAT_NCHW ||
format == FORMAT_NCHW_VECT_C) ? DimMaps::kTdmNCHW[num_spatial_dims] :
(format == FORMAT_HWNC) ? DimMaps::kTdmHWNC[num_spatial_dims] :
(format == FORMAT_HWCN) ? DimMaps::kTdmHWCN[num_spatial_dims]
: DimMaps::kTdmInvalid;
}
inline constexpr const FilterDimMap&
GetFilterDimMap(const int num_spatial_dims,
const FilterTensorFormat format) {
return
(format == FORMAT_HWIO) ? DimMaps::kFdmHWIO[num_spatial_dims] :
(format == FORMAT_OIHW ||
format == FORMAT_OIHW_VECT_I) ? DimMaps::kFdmOIHW[num_spatial_dims]
: DimMaps::kFdmInvalid;
}
constexpr TensorDimMap DimMaps::kTdmInvalid;
constexpr TensorDimMap DimMaps::kTdmNHWC[4];
constexpr TensorDimMap DimMaps::kTdmNCHW[4];
constexpr TensorDimMap DimMaps::kTdmHWNC[4];
constexpr TensorDimMap DimMaps::kTdmHWCN[4];
constexpr FilterDimMap DimMaps::kFdmInvalid;
constexpr FilterDimMap DimMaps::kFdmHWIO[4];
constexpr FilterDimMap DimMaps::kFdmOIHW[4];
TEST(TensorFormatTest, FormatEnumsAndStrings) {
const string prefix = "FORMAT_";
for (auto& test_data_format : test_data_formats) {
const char* stringified_format_enum = test_data_format.second;
LOG(INFO) << stringified_format_enum << " = " << test_data_format.first;
string expected_format_str = &stringified_format_enum[prefix.size()];
TensorFormat format;
EXPECT_TRUE(FormatFromString(expected_format_str, &format));
string format_str = ToString(format);
EXPECT_EQ(expected_format_str, format_str);
EXPECT_EQ(test_data_format.first, format);
}
for (auto& test_filter_format : test_filter_formats) {
const char* stringified_format_enum = test_filter_format.second;
LOG(INFO) << stringified_format_enum << " = " << test_filter_format.first;
string expected_format_str = &stringified_format_enum[prefix.size()];
FilterTensorFormat format;
EXPECT_TRUE(FilterFormatFromString(expected_format_str, &format));
string format_str = ToString(format);
EXPECT_EQ(expected_format_str, format_str);
EXPECT_EQ(test_filter_format.first, format);
}
}
template <int num_spatial_dims>
void RunDimensionIndexesTest() {
for (auto& test_data_format : test_data_formats) {
TensorFormat format = test_data_format.first;
auto& tdm = GetTensorDimMap(num_spatial_dims, format);
int num_dims = GetTensorDimsFromSpatialDims(num_spatial_dims, format);
LOG(INFO) << ToString(format) << ", num_spatial_dims=" << num_spatial_dims
<< ", num_dims=" << num_dims;
EXPECT_EQ(GetTensorBatchDimIndex(num_dims, format), tdm.n());
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, 'N'), tdm.n());
EXPECT_EQ(GetTensorFeatureDimIndex(num_dims, format), tdm.c());
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, 'C'), tdm.c());
for (int i = 0; i < num_spatial_dims; ++i) {
EXPECT_EQ(GetTensorSpatialDimIndex(num_dims, format, i), tdm.spatial(i));
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, '0' + i),
tdm.spatial(i));
}
}
for (auto& test_filter_format : test_filter_formats) {
FilterTensorFormat format = test_filter_format.first;
auto& fdm = GetFilterDimMap(num_spatial_dims, format);
int num_dims = GetFilterTensorDimsFromSpatialDims(num_spatial_dims, format);
LOG(INFO) << ToString(format) << ", num_spatial_dims=" << num_spatial_dims
<< ", num_dims=" << num_dims;
EXPECT_EQ(GetFilterTensorOutputChannelsDimIndex(num_dims, format), fdm.o());
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, 'O'), fdm.o());
EXPECT_EQ(GetFilterTensorInputChannelsDimIndex(num_dims, format), fdm.i());
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, 'I'), fdm.i());
for (int i = 0; i < num_spatial_dims; ++i) {
EXPECT_EQ(GetFilterTensorSpatialDimIndex(num_dims, format, i),
fdm.spatial(i));
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, '0' + i),
fdm.spatial(i));
}
}
}
TEST(TensorFormatTest, DimensionIndexes) {
RunDimensionIndexesTest<1>();
RunDimensionIndexesTest<2>();
RunDimensionIndexesTest<3>();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_format.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_format_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecbc942e-789a-4c27-8270-279e04a643e2 | cpp | tensorflow/tensorflow | bcast | tensorflow/core/util/bcast.cc | tensorflow/core/util/bcast_test.cc | #include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
BCast::Vec BCast::FromShape(const TensorShape& shape) {
const int N = shape.dims();
BCastList::Vec ret(N);
for (int i = 0; i < N; ++i) {
ret[i] = shape.dim_size(i);
}
return ret;
}
TensorShape BCast::ToShape(const BCastList::Vec& vec) {
TensorShape shape(vec);
return shape;
}
} | #include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
string BCast(const tensorflow::BCast::Vec& x, const tensorflow::BCast::Vec& y,
const bool fewer_dims_optimization = true) {
tensorflow::BCast b(x, y, fewer_dims_optimization);
if (!b.IsValid()) {
return "invalid";
}
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_reshape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_bcast(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_reshape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_bcast(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.result_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.output_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_x_reduce_idx(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_y_reduce_idx(), ","), "]");
return ret;
}
string BCastBatchIndices(const tensorflow::BCast::Vec& x,
const tensorflow::BCast::Vec& y,
const bool fewer_dims_optimization = true) {
tensorflow::BCast b(x, y, fewer_dims_optimization,
true);
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_batch_indices(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_batch_indices(), ","), "]");
return ret;
}
string BCastList3(const tensorflow::BCast::Vec& x,
const tensorflow::BCast::Vec& y,
const tensorflow::BCast::Vec& z,
const bool fewer_dims_optimization = true) {
tensorflow::BCastList<3> b({x, y, z}, fewer_dims_optimization);
if (!b.IsValid()) {
return "invalid";
}
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(2), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(2), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.result_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.output_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(2), ","), "]");
return ret;
}
TEST(BCastTest, Invalid) {
for (const bool use_optimization : {true, false}) {
EXPECT_EQ("invalid", BCast({5, 3, 2}, {3}, use_optimization));
EXPECT_EQ("invalid", BCast({5, 3, 2}, {2, 2}, use_optimization));
EXPECT_EQ("invalid", BCast({5, 3, 2}, {10, 1, 1}, use_optimization));
EXPECT_EQ("invalid",
BCast({1, 2, 1, 2, 1, 2}, {2, 4, 2, 1, 2, 1}, use_optimization));
}
}
TEST(BCastListTest, Invalid) {
for (const bool use_optimization : {true, false}) {
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {3}, {1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {2, 2}, {1}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({5, 3, 2}, {10, 1, 1}, {1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1, 2, 1, 2, 1, 2}, {2, 4, 2, 1, 2, 1}, {1},
use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {1}, {3}, use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {1}, {2, 2}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({5, 3, 2}, {1}, {10, 1, 1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1}, {5, 3, 2}, {3}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1}, {5, 3, 2}, {2, 2}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({1}, {5, 3, 2}, {10, 1, 1}, use_optimization));
}
}
TEST(BCastTest, Basic_SameShape) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}),
"[2310][1][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[][]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][]");
}
TEST(BCastListTest, Basic_SameShape) {
EXPECT_EQ(BCastList3({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}),
"[2310][1][2310][1][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[][][]");
EXPECT_EQ(
BCastList3({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][][]");
}
TEST(BCastTest, Basic_SameShapeWithZeroDim) {
EXPECT_EQ(BCast({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}),
"[0][1][0][1]"
"[0]"
"[11,7,0,3,2]"
"[][]");
EXPECT_EQ(BCast({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, false),
"[11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1]"
"[11,7,0,3,2]"
"[11,7,0,3,2]"
"[][]");
}
TEST(BCastListTest, Basic_SameShapeWithZeroDim) {
EXPECT_EQ(BCastList3({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}),
"[0][1][0][1][0][1]"
"[0]"
"[11,7,0,3,2]"
"[][][]");
EXPECT_EQ(
BCastList3({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, false),
"[11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1]"
"[11,7,0,3,2]"
"[11,7,0,3,2]"
"[][][]");
}
TEST(BCastTest, Basic_Scalar_Scalar) {
EXPECT_EQ(BCast({1, 1}, {}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {1, 1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {1, 1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
}
TEST(BCastTest, Basic_TrueScalar_Scalar) {
EXPECT_EQ(BCast({}, {}),
"[1][1][1][1]"
"[1]"
"[]"
"[][]");
EXPECT_EQ(BCast({}, {1}),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({}, {1}, false),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({}, {1, 1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({}, {1, 1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {}),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({1}, {}, false),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({1, 1}, {}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
}
TEST(BCastListTest, Basic_Scalar_Scalar_Scalar) {
EXPECT_EQ(BCastList3({1, 1}, {1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1, 1}, {1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1, 1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1}, {1, 1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1}, {1, 1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
}
TEST(BCastListTest, Basic_TrueScalar_Scalar_Scalar) {
EXPECT_EQ(BCastList3({1, 1}, {1}, {}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1, 1}, {1}, {}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({}, {1, 1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({}, {1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {}, {1, 1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {}, {1, 1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
}
TEST(BCastTest, Basic_Tensor_Scalar) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,3,2]"
"[][0,1,2,3,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,1,1][11,7,5,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,3,4]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[0,1,2,3,4][]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2}, false),
"[1,1,1,1,1][11,7,5,3,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,3,4][]");
EXPECT_EQ(BCast({1, 2147483648}, {1}),
"[2147483648][1][1][2147483648]"
"[2147483648]"
"[1,2147483648]"
"[0][0,1]");
}
TEST(BCastTest, Basic_Tensor_With_DimSize_1_Scalar) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2, 1}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,3,2,1]"
"[5][0,1,2,3,4,5]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2, 1}, {1}, false),
"[11,7,5,3,2,1][1,1,1,1,1,1][1,1,1,1,1,1][11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[5][0,1,2,3,4,5]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2, 1}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,3,2,1]"
"[0,1,2,3,4,5][5]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2, 1}, false),
"[1,1,1,1,1,1][11,7,5,3,2,1][11,7,5,3,2,1][1,1,1,1,1,1]"
"[11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[0,1,2,3,4,5][5]");
EXPECT_EQ(BCast({11, 7, 5, 1, 1, 3, 2, 1, 1}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,1,1,3,2,1,1]"
"[3,4,7,8][0,1,2,3,4,5,6,7,8]");
EXPECT_EQ(BCast({11, 7, 5, 1, 1, 3, 2, 1, 1}, {1}, false),
"[11,7,5,1,1,3,2,1,1][1,1,1,1,1,1,1,1,1]"
"[1,1,1,1,1,1,1,1,1][11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[3,4,7,8][0,1,2,3,4,5,6,7,8]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 1, 1, 3, 2, 1, 1}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,1,1,3,2,1,1]"
"[0,1,2,3,4,5,6,7,8][3,4,7,8]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 1, 1, 3, 2, 1, 1}, false),
"[1,1,1,1,1,1,1,1,1][11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1][1,1,1,1,1,1,1,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[0,1,2,3,4,5,6,7,8][3,4,7,8]");
}
TEST(BCastTest, Basic_Tensor_Vector) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {2}),
"[1155,2][1,1][1,2][1155,1]"
"[1155,2]"
"[11,7,5,3,2]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {2}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,1,2][11,7,5,3,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({2}, {11, 7, 5, 3, 2}),
"[1,2][1155,1][1155,2][1,1]"
"[1155,2]"
"[11,7,5,3,2]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2}, {11, 7, 5, 3, 2}, false),
"[1,1,1,1,2][11,7,5,3,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,3][]");
}
TEST(BCastTest, Basic_Tensor_Matrix) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 2}),
"[385,6][1,1][1,6][385,1]"
"[385,6]"
"[11,7,5,3,2]"
"[][0,1,2]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,3,2][11,7,5,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2]");
EXPECT_EQ(BCast({3, 2}, {11, 7, 5, 3, 2}),
"[1,6][385,1][385,6][1,1]"
"[385,6]"
"[11,7,5,3,2]"
"[0,1,2][]");
EXPECT_EQ(BCast({3, 2}, {11, 7, 5, 3, 2}, false),
"[1,1,1,3,2][11,7,5,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2][]");
}
TEST(BCastTest, Basic_Tensor_Matrix_Column) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 1}),
"[385,3,2][1,1,1][1,3,1][385,1,2]"
"[385,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,3,1][11,7,5,1,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,4]");
EXPECT_EQ(BCast({3, 1}, {11, 7, 5, 3, 2}),
"[1,3,1][385,1,2][385,3,2][1,1,1]"
"[385,3,2]"
"[11,7,5,3,2]"
"[0,1,2,4][]");
EXPECT_EQ(BCast({3, 1}, {11, 7, 5, 3, 2}, false),
"[1,1,1,3,1][11,7,5,1,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,4][]");
}
TEST(BCastTest, Basic_Tensor_Matrix_As_Tensor) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {7, 5, 1, 1}),
"[11,35,6][1,1,1][1,35,1][11,1,6]"
"[11,35,6]"
"[11,7,5,3,2]"
"[][0,3,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {7, 5, 1, 1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,7,5,1,1][11,1,1,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,3,4]");
EXPECT_EQ(BCast({7, 5, 1, 1}, {11, 7, 5, 3, 2}),
"[1,35,1][11,1,6][11,35,6][1,1,1]"
"[11,35,6]"
"[11,7,5,3,2]"
"[0,3,4][]");
EXPECT_EQ(BCast({7, 5, 1, 1}, {11, 7, 5, 3, 2}, false),
"[1,7,5,1,1][11,1,1,3,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2][11,7,5,3,2]"
"[0,3,4][]");
}
TEST(BCastTest, Basic_SymbolicShape) {
constexpr int64_t kSymDim1 = -10'000'000'000;
constexpr int64_t kSymDim2 = -10'000'000'001;
const tensorflow::BCast bcast({10, kSymDim1, kSymDim2}, {10, 1, 1}, false);
EXPECT_TRUE(bcast.IsValid());
EXPECT_EQ(bcast.output_batch_size(), -1);
}
TEST(BCastTest, Complex_BCast_To_Each_Other) {
string truth =
"[11,1,5,1,2][1,7,1,3,1][1,7,1,3,1][11,1,5,1,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[1,3][0,2,4]";
EXPECT_EQ(BCast({11, 1, 5, 1, 2}, {7, 1, 3, 1}), truth);
EXPECT_EQ(BCast({11, 1, 5, 1, 2}, {7, 1, 3, 1}, false), truth);
}
TEST(BCastListTest, Complex_BCast_To_Each_Other) {
string truth =
"[11,1,1,1,2][1,7,5,3,1]"
"[1,7,1,3,1][11,1,5,1,2]"
"[1,1,5,1,1][11,7,1,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[1,2,3][0,2,4][0,1,3,4]";
EXPECT_EQ(BCastList3({11, 1, 1, 1, 2}, {7, 1, 3, 1}, {5, 1, 1}), truth);
EXPECT_EQ(BCastList3({11, 1, 1, 1, 2}, {7, 1, 3, 1}, {5, 1, 1}, false),
truth);
}
TEST(BCastTest, TestZeroDimensionShape) {
EXPECT_EQ(BCast({2, 0, 5}, {5}),
"[0,5][1,1][1,5][0,1]"
"[0,5]"
"[2,0,5]"
"[][0,1]");
EXPECT_EQ(BCast({5}, {2, 0, 5}),
"[1,5][0,1][0,5][1,1]"
"[0,5]"
"[2,0,5]"
"[0,1][]");
EXPECT_EQ(BCast({2, 0, 5}, {5}, false),
"[2,0,5][1,1,1][1,1,5][2,0,1]"
"[2,0,5]"
"[2,0,5]"
"[][0,1]");
EXPECT_EQ(BCast({5}, {2, 0, 5}, false),
"[1,1,5][2,0,1][2,0,5][1,1,1]"
"[2,0,5]"
"[2,0,5]"
"[0,1][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {5}),
"[0,5][1,1][1,5][0,1]"
"[0,5]"
"[2,0,3,0,5]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({5}, {2, 0, 3, 0, 5}),
"[1,5][0,1][0,5][1,1]"
"[0,5]"
"[2,0,3,0,5]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {5}, false),
"[2,0,3,0,5][1,1,1,1,1][1,1,1,1,5][2,0,3,0,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({5}, {2, 0, 3, 0, 5}, false),
"[1,1,1,1,5][2,0,3,0,1][2,0,3,0,5][1,1,1,1,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {3, 1, 5}),
"[0,3,0,5][1,1,1,1][1,3,1,5][0,1,0,1]"
"[0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,3]");
EXPECT_EQ(BCast({3, 1, 5}, {2, 0, 3, 0, 5}),
"[1,3,1,5][0,1,0,1][0,3,0,5][1,1,1,1]"
"[0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {3, 1, 5}, false),
"[2,0,3,0,5][1,1,1,1,1][1,1,3,1,5][2,0,1,0,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,3]");
EXPECT_EQ(BCast({3, 1, 5}, {2, 0, 3, 0, 5}, false),
"[1,1,3,1,5][2,0,1,0,1][2,0,3,0,5][1,1,1,1,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,3][]");
}
TEST(BCastTest, BatchIndices) {
EXPECT_EQ("[0,0,0,0][0,1,2,3]", BCastBatchIndices({1}, {4}));
EXPECT_EQ("[][]", BCastBatchIndices({5}, {7}));
EXPECT_EQ("[][]", BCastBatchIndices({2, 4, 6}, {2, 4, 6}));
EXPECT_EQ("[0,0,0,0,1,1,1,1,2,2,2,2][0,1,2,3,0,1,2,3,0,1,2,3]",
BCastBatchIndices({3, 1}, {1, 4}));
EXPECT_EQ("[0,0,1,1,2,2,0,0,1,1,2,2][0,1,0,1,0,1,2,3,2,3,2,3]",
BCastBatchIndices({3, 1}, {2, 1, 2}));
}
void BM_BCastSetup(::testing::benchmark::State& state) {
const int same_shape = state.range(0);
if (same_shape) {
state.SetLabel("same_shapes");
for (auto s : state) {
class BCast b({1000, 100}, {1000, 100});
}
} else {
state.SetLabel("different_shapes");
for (auto s : state) {
class BCast b({3, 1, 5}, {2, 0, 3, 0, 5});
}
}
}
BENCHMARK(BM_BCastSetup)->Arg(0)->Arg(1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bcast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bcast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c844ed3-65dc-43a5-9fc4-9d6a6269a942 | cpp | tensorflow/tensorflow | tensor_slice_writer | tensorflow/core/util/tensor_slice_writer.cc | tensorflow/core/util/tensor_slice_writer_test.cc | #include "tensorflow/core/util/tensor_slice_writer.h"
#include <memory>
#include <utility>
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
namespace {
class TableBuilder : public TensorSliceWriter::Builder {
public:
TableBuilder(const string& name, WritableFile* f) : name_(name), file_(f) {
table::Options option;
option.compression = table::kNoCompression;
builder_ = std::make_unique<table::TableBuilder>(option, f);
}
void Add(StringPiece key, StringPiece val) override {
builder_->Add(key, val);
}
Status Finish(int64_t* file_size) override {
*file_size = -1;
Status s = builder_->Finish();
if (s.ok()) {
s = file_->Close();
if (s.ok()) {
*file_size = builder_->FileSize();
}
}
if (!s.ok()) {
s = errors::Internal("Error writing (tmp) checkpoint file: ", name_, ": ",
s.message());
}
builder_.reset();
file_.reset();
return s;
}
private:
string name_;
std::unique_ptr<WritableFile> file_;
std::unique_ptr<table::TableBuilder> builder_;
};
}
Status CreateTableTensorSliceBuilder(const string& name,
TensorSliceWriter::Builder** builder) {
*builder = nullptr;
std::unique_ptr<WritableFile> f;
Status s = Env::Default()->NewWritableFile(name, &f);
if (s.ok()) {
*builder = new TableBuilder(name, f.release());
return absl::OkStatus();
} else {
return s;
}
}
TensorSliceWriter::TensorSliceWriter(const string& filename,
CreateBuilderFunction create_builder)
: filename_(filename),
create_builder_(std::move(create_builder)),
slices_(0) {
Env* env = Env::Default();
Status status = env->CanCreateTempFile(filename_, &use_temp_file_);
if (!status.ok()) {
LOG(ERROR) << "Failed to get CanCreateTempFile attribute: " << filename_;
use_temp_file_ = true;
}
data_filename_ = filename_;
if (use_temp_file_) {
data_filename_ = strings::StrCat(filename_, ".tempstate", random::New64());
}
VersionDef* versions = sts_.mutable_meta()->mutable_versions();
versions->set_producer(TF_CHECKPOINT_VERSION);
versions->set_min_consumer(TF_CHECKPOINT_VERSION_MIN_CONSUMER);
}
Status TensorSliceWriter::Finish() {
Builder* b;
Status s = create_builder_(data_filename_, &b);
if (!s.ok()) {
delete b;
return s;
}
std::unique_ptr<Builder> builder(b);
string meta;
sts_.AppendToString(&meta);
builder->Add(kSavedTensorSlicesKey, meta);
for (const auto& x : data_) {
builder->Add(x.first, x.second);
}
int64_t file_size;
s = builder->Finish(&file_size);
if (use_temp_file_) {
if (s.ok()) {
s = Env::Default()->RenameFile(data_filename_, filename_);
if (s.ok()) {
VLOG(1) << "Written " << slices_ << " slices for "
<< sts_.meta().tensor_size() << " tensors (" << file_size
<< " bytes) to " << filename_;
} else {
LOG(ERROR) << "Failed to rename file " << data_filename_ << " to "
<< filename_;
}
} else {
Env::Default()->DeleteFile(data_filename_).IgnoreError();
}
}
return s;
}
size_t TensorSliceWriter::MaxBytesPerElement(DataType dt) {
size_t max_bytes_per_element =
TensorSliceWriter::MaxBytesPerElementOrZero(dt);
if (max_bytes_per_element == 0) {
LOG(FATAL) << "MaxBytesPerElement not implemented for dtype: " << dt;
}
return max_bytes_per_element;
}
size_t TensorSliceWriter::MaxBytesPerElementOrZero(DataType dt) {
switch (dt) {
case DT_FLOAT:
return 4;
case DT_DOUBLE:
return 8;
case DT_INT32:
return 10;
case DT_UINT8:
return 2;
case DT_INT16:
return 10;
case DT_INT8:
return 10;
case DT_COMPLEX64:
return 8;
case DT_INT64:
return 10;
case DT_BOOL:
return 1;
case DT_QINT8:
return 10;
case DT_QUINT8:
return 2;
case DT_QINT32:
return 10;
case DT_QINT16:
return 10;
case DT_QUINT16:
return 3;
case DT_UINT16:
return 3;
case DT_COMPLEX128:
return 16;
case DT_HALF:
return 3;
case DT_INVALID:
case DT_STRING:
case DT_BFLOAT16:
default:
return 0;
}
}
template <>
Status TensorSliceWriter::SaveData(const tstring* data, int64_t num_elements,
SavedSlice* ss) {
size_t size_bound = ss->ByteSize() + kTensorProtoHeaderBytes +
(num_elements * MaxBytesPerElement(DT_INT32));
for (int64_t i = 0; i < num_elements; ++i) {
size_bound += data[i].size();
}
if (size_bound > kMaxMessageBytes) {
return errors::InvalidArgument(
"Tensor slice is too large to serialize (conservative estimate: ",
size_bound, " bytes)");
}
Fill(data, num_elements, ss->mutable_data());
DCHECK_GE(ss->ByteSize(), 0);
DCHECK_LE(ss->ByteSize(), size_bound);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/util/tensor_slice_writer.h"
#include <algorithm>
#include <array>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_slice_reader.h"
namespace tensorflow {
namespace checkpoint {
class TensorSliceWriteTestHelper {
public:
static void CheckEntries(const string& fname);
static void GetData(TensorSliceReader::Table* table, const string& name,
const TensorSlice& slice, SavedSlice* ss);
};
namespace {
void ExpectIdenticalFloatArrays(const float* expected, int size,
const float* actual) {
for (int i = 0; i < size; ++i) {
EXPECT_NEAR(expected[i], actual[i], 1e-6);
}
}
template <typename T, typename U>
void ExpectIdenticalIntArrays(const T* expected, int size, const U* actual) {
for (int i = 0; i < size; ++i) {
EXPECT_EQ(expected[i], static_cast<T>(actual[i]));
}
}
template <typename T, unsigned SIZE>
inline size_t ArraySize(const T (&v)[SIZE]) {
return SIZE;
}
TEST(TensorSliceWriteTest, SimpleWrite) {
const string filename = io::JoinPath(testing::TmpDir(), "checkpoint");
TensorSliceWriter writer(filename, CreateTableTensorSliceBuilder);
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:0,1");
const int32 data[] = {0, 1, 2, 3, 4};
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:3,1");
const int32 data[] = {10, 11, 12, 13, 14};
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
TensorShape shape({3, 2});
TensorSlice slice = TensorSlice::ParseOrDie("-:-");
const float data[] = {1.2, 1.3, 1.4, 2.1, 2.2, 2.3};
TF_CHECK_OK(writer.Add("AA", shape, slice, data));
}
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:3,1");
const int64_t data[] = {10, 11, 12, 13, 14};
TF_CHECK_OK(writer.Add("int64", shape, slice, data));
}
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:3,1");
const int16 data[] = {10, 11, 12, 13, 14};
TF_CHECK_OK(writer.Add("int16", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
TensorSliceWriteTestHelper::CheckEntries(filename);
}
}
void TensorSliceWriteTestHelper::GetData(TensorSliceReader::Table* table,
const string& name,
const TensorSlice& slice,
SavedSlice* ss) {
string key = EncodeTensorNameSlice(name, slice);
string value;
EXPECT_TRUE(table->Get(key, &value));
SavedTensorSlices sts;
EXPECT_TRUE(ParseProtoUnlimited(&sts, value));
EXPECT_FALSE(sts.has_meta());
*ss = sts.data();
EXPECT_EQ(name, ss->name());
TensorSlice slice2(ss->slice());
EXPECT_EQ(slice.DebugString(), slice2.DebugString());
}
void TensorSliceWriteTestHelper::CheckEntries(const string& fname) {
TensorSliceReader::Table* tptr;
TF_CHECK_OK(OpenTableTensorSliceReader(fname, &tptr));
std::unique_ptr<TensorSliceReader::Table> table(tptr);
CHECK_NOTNULL(table.get());
string value;
ASSERT_TRUE(table->Get(kSavedTensorSlicesKey, &value));
{
SavedTensorSlices sts;
EXPECT_TRUE(ParseProtoUnlimited(&sts, value));
EXPECT_TRUE(sts.has_meta());
EXPECT_EQ(4, sts.meta().tensor_size());
EXPECT_LT(0, TF_CHECKPOINT_VERSION);
EXPECT_EQ(TF_CHECKPOINT_VERSION, sts.meta().versions().producer());
EXPECT_EQ(TF_CHECKPOINT_VERSION_MIN_CONSUMER,
sts.meta().versions().min_consumer());
EXPECT_FALSE(sts.has_data());
{
const SavedSliceMeta& ssm = sts.meta().tensor(0);
EXPECT_EQ("test", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 5 } "
"dim { size: 10 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_INT32, ssm.type());
EXPECT_EQ(2, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
TensorSlice s1(ssm.slice(1));
EXPECT_EQ("-:0,1", s0.DebugString());
EXPECT_EQ("-:3,1", s1.DebugString());
}
{
const SavedSliceMeta& ssm = sts.meta().tensor(1);
EXPECT_EQ("AA", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 3 } "
"dim { size: 2 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_FLOAT, ssm.type());
EXPECT_EQ(1, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
EXPECT_EQ("-:-", s0.DebugString());
}
{
const SavedSliceMeta& ssm = sts.meta().tensor(2);
EXPECT_EQ("int64", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 5 } "
"dim { size: 10 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_INT64, ssm.type());
EXPECT_EQ(1, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
EXPECT_EQ("-:3,1", s0.DebugString());
}
{
const SavedSliceMeta& ssm = sts.meta().tensor(3);
EXPECT_EQ("int16", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 5 } "
"dim { size: 10 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_INT16, ssm.type());
EXPECT_EQ(1, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
EXPECT_EQ("-:3,1", s0.DebugString());
}
}
{
SavedSlice ss;
GetData(table.get(), "AA", TensorSlice(2), &ss);
const float data[] = {1.2, 1.3, 1.4, 2.1, 2.2, 2.3};
EXPECT_EQ(ArraySize(data), ss.data().float_val_size());
ExpectIdenticalFloatArrays(data, ArraySize(data),
ss.data().float_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "test", TensorSlice({{0, -1}, {0, 1}}), &ss);
const int32 data[] = {0, 1, 2, 3, 4};
EXPECT_EQ(ArraySize(data), ss.data().int_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data), ss.data().int_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "test", TensorSlice({{0, -1}, {3, 1}}), &ss);
const int32 data[] = {10, 11, 12, 13, 14};
EXPECT_EQ(ArraySize(data), ss.data().int_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data), ss.data().int_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "int64", TensorSlice({{0, -1}, {3, 1}}), &ss);
const int64_t data[] = {10, 11, 12, 13, 14};
EXPECT_EQ(ArraySize(data), ss.data().int64_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data),
ss.data().int64_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "int16", TensorSlice({{0, -1}, {3, 1}}), &ss);
const int16 data[] = {10, 11, 12, 13, 14};
EXPECT_EQ(ArraySize(data), ss.data().int_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data), ss.data().int_val().data());
}
}
template <typename DT>
size_t BytesPerElementHelper(DT value) {
SavedSlice ss;
std::array<DT, 1> lo_data;
std::fill(lo_data.begin(), lo_data.end(), value);
TF_EXPECT_OK(
TensorSliceWriter::SaveData(lo_data.data(), lo_data.size(), &ss));
size_t lo_byte_size = ss.ByteSizeLong();
std::array<DT, 1001> hi_data;
std::fill(hi_data.begin(), hi_data.end(), value);
TF_EXPECT_OK(
TensorSliceWriter::SaveData(hi_data.data(), hi_data.size(), &ss));
size_t hi_byte_size = ss.ByteSizeLong();
return (hi_byte_size - lo_byte_size) / (hi_data.size() - lo_data.size());
}
TEST(TensorSliceWriteTest, CheckpointSize) {
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_BOOL),
BytesPerElementHelper<bool>(false));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_BOOL),
BytesPerElementHelper<bool>(true));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_FLOAT),
BytesPerElementHelper<float>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_DOUBLE),
BytesPerElementHelper<double>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_COMPLEX64),
BytesPerElementHelper<complex64>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_COMPLEX128),
BytesPerElementHelper<complex128>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT32),
BytesPerElementHelper<int32>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT64),
BytesPerElementHelper<int64_t>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_UINT16),
BytesPerElementHelper<uint16>(std::numeric_limits<uint16>::max()));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_UINT8),
BytesPerElementHelper<uint8>(std::numeric_limits<uint8>::max()));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT8),
BytesPerElementHelper<int8>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT16),
BytesPerElementHelper<int16>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_QINT8),
BytesPerElementHelper<qint8>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_QUINT8),
BytesPerElementHelper<quint8>(std::numeric_limits<uint8>::max()));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_QINT32),
BytesPerElementHelper<qint32>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_HALF),
BytesPerElementHelper<Eigen::half>(Eigen::half(-1.0)));
}
TEST(TensorSliceWriteTest, SizeErrors) {
const string filename = io::JoinPath(testing::TmpDir(), "checkpoint");
TensorSliceWriter writer(filename, CreateTableTensorSliceBuilder);
{
TensorShape shape({300, 1000000});
TensorSlice slice = TensorSlice::ParseOrDie("-:-");
const std::vector<int8> data(300000000, -1);
Status s = writer.Add("test1", shape, slice, data.data());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(),
"Tensor slice is too large to serialize"));
}
{
TensorShape shape({256, 1024});
TensorSlice slice = TensorSlice::ParseOrDie("-:-");
const std::vector<tstring> data(256 * 1024, std::string(8192, 'f'));
Status s = writer.Add("test2", shape, slice, data.data());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(),
"Tensor slice is too large to serialize"));
}
}
TEST(TensorSliceWriterTest, InvalidInput) {
SavedSlice ss;
std::array<uint32_t, 1> data;
std::fill(data.begin(), data.end(), 1234);
Status s = TensorSliceWriter::SaveData(data.data(), data.size(), &ss);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(
s.message(), "Tensor slice serialization not implemented for dtype"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e44e4956-5339-4165-8837-5ff0460ea0e3 | cpp | tensorflow/tensorflow | example_proto_fast_parsing | tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.cc | tensorflow/core/util/example_proto_fast_parsing_test.cc | #include "tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h"
#include <algorithm>
#include <utility>
namespace tensorflow {
namespace example {
string ExampleName(const absl::Span<const tstring> example_names, int n) {
return example_names.empty() ? "<unknown>" : example_names[n];
}
void CountSparseFeatures(
const std::vector<std::vector<SparseBuffer>>& sparse_buffers, size_t d,
size_t* total_num_features, size_t* max_num_features) {
for (auto& sparse_values_tmp : sparse_buffers) {
const std::vector<size_t>& end_indices =
sparse_values_tmp[d].example_end_indices;
*total_num_features += end_indices.back();
*max_num_features = std::max(*max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
*max_num_features = std::max(*max_num_features, example_size);
}
}
}
void CopySparseBufferToTensor(DataType dtype, size_t offset, SparseBuffer* src,
Tensor* dst) {
switch (dtype) {
case DT_INT64: {
std::copy(src->int64_list.begin(), src->int64_list.end(),
dst->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy(src->float_list.begin(), src->float_list.end(),
dst->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::move(src->bytes_list.begin(), src->bytes_list.end(),
dst->flat<tstring>().data() + offset);
break;
}
default:
ReportUnexpectedDataType(dtype);
}
}
uint8 PeekTag(protobuf::io::CodedInputStream* stream) {
DCHECK(stream != nullptr);
const void* ptr;
int size;
if (!stream->GetDirectBufferPointer(&ptr, &size)) return 0;
return *static_cast<const uint8*>(ptr);
}
bool ParseString(protobuf::io::CodedInputStream* stream, StringPiece* result) {
DCHECK(stream != nullptr);
DCHECK(result != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
if (length == 0) {
*result = StringPiece(nullptr, 0);
return true;
}
const void* stream_alias;
int stream_size;
if (!stream->GetDirectBufferPointer(&stream_alias, &stream_size)) {
return false;
}
if (static_cast<uint32>(stream_size) < length) return false;
*result = StringPiece(static_cast<const char*>(stream_alias), length);
stream->Skip(length);
return true;
}
bool ParseFeatureMapEntry(protobuf::io::CodedInputStream* stream,
parsed::FeatureMapEntry* feature_map_entry) {
DCHECK(stream != nullptr);
DCHECK(feature_map_entry != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
auto limit = stream->PushLimit(length);
if (!stream->ExpectTag(kDelimitedTag(1))) return false;
if (!ParseString(stream, &feature_map_entry->first)) return false;
if (!stream->ExpectTag(kDelimitedTag(2))) return false;
StringPiece feature_string_piece;
if (!ParseString(stream, &feature_string_piece)) return false;
feature_map_entry->second = parsed::Feature(feature_string_piece);
if (!stream->ExpectAtEnd()) return false;
stream->PopLimit(limit);
return true;
}
bool ParseFeatures(protobuf::io::CodedInputStream* stream,
parsed::Example* example) {
DCHECK(stream != nullptr);
DCHECK(example != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
auto limit = stream->PushLimit(length);
while (!stream->ExpectAtEnd()) {
parsed::FeatureMapEntry feature_map_entry;
if (!stream->ExpectTag(kDelimitedTag(1))) return false;
if (!ParseFeatureMapEntry(stream, &feature_map_entry)) return false;
example->push_back(std::move(feature_map_entry));
}
stream->PopLimit(limit);
return true;
}
bool ParseExample(protobuf::io::CodedInputStream* stream,
parsed::Example* example) {
DCHECK(stream != nullptr);
DCHECK(example != nullptr);
while (!stream->ExpectAtEnd()) {
if (!stream->ExpectTag(kDelimitedTag(1))) {
if (!SkipExtraneousTag(stream)) return false;
} else {
if (!ParseFeatures(stream, example)) return false;
}
}
return true;
}
bool ParseExample(StringPiece serialized, parsed::Example* example) {
DCHECK(example != nullptr);
protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized.data()), serialized.size());
EnableAliasing(&stream);
return ParseExample(&stream, example);
}
template <>
void CopyOrMoveBlock(const tstring* b, const tstring* e, tstring* t) {
std::move(b, e, t);
}
template <>
const SmallVector<int64_t>& GetListFromBuffer<int64_t>(
const SparseBuffer& buffer) {
return buffer.int64_list;
}
template <>
const SmallVector<float>& GetListFromBuffer<float>(const SparseBuffer& buffer) {
return buffer.float_list;
}
template <>
const SmallVector<tstring>& GetListFromBuffer<tstring>(
const SparseBuffer& buffer) {
return buffer.bytes_list;
}
}
} | #include "tensorflow/core/util/example_proto_fast_parsing.h"
#include <unordered_set>
#include <utility>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/util/example_proto_fast_parsing_test.pb.h"
namespace tensorflow {
namespace example {
namespace {
constexpr char kDenseInt64Key[] = "dense_int64";
constexpr char kDenseFloatKey[] = "dense_float";
constexpr char kDenseStringKey[] = "dense_string";
constexpr char kSparseInt64Key[] = "sparse_int64";
constexpr char kSparseFloatKey[] = "sparse_float";
constexpr char kSparseStringKey[] = "sparse_string";
string SerializedToReadable(string serialized) {
string result;
result += '"';
for (char c : serialized)
result += strings::StrCat("\\x", strings::Hex(c, strings::kZeroPad2));
result += '"';
return result;
}
template <class T>
string Serialize(const T& example) {
string serialized;
example.SerializeToString(&serialized);
return serialized;
}
void TestCorrectness(const string& serialized) {
Example example;
Example fast_example;
EXPECT_TRUE(example.ParseFromString(serialized));
example.DiscardUnknownFields();
EXPECT_TRUE(TestFastParse(serialized, &fast_example));
EXPECT_EQ(example.DebugString(), fast_example.DebugString());
if (example.DebugString() != fast_example.DebugString()) {
LOG(ERROR) << "Bad serialized: " << SerializedToReadable(serialized);
}
}
TEST(FastParse, IgnoresPrecedingUnknownTopLevelFields) {
ExampleWithExtras example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
example.set_extra1("some_str");
example.set_extra2(123);
example.set_extra3(234);
example.set_extra4(345);
example.set_extra5(4.56);
example.add_extra6(5.67);
example.add_extra6(6.78);
(*example.mutable_extra7()->mutable_feature())["extra7"]
.mutable_int64_list()
->add_value(1337);
Example context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, IgnoresTrailingUnknownTopLevelFields) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
ExampleWithExtras context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
context.set_extra1("some_str");
context.set_extra2(123);
context.set_extra3(234);
context.set_extra4(345);
context.set_extra5(4.56);
context.add_extra6(5.67);
context.add_extra6(6.78);
(*context.mutable_extra7()->mutable_feature())["extra7"]
.mutable_int64_list()
->add_value(1337);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, SingleInt64WithContext) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
Example context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, DenseInt64WithContext) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(0);
Example context;
(*context.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(15);
string serialized = Serialize(example) + Serialize(context);
{
Example deserialized;
EXPECT_TRUE(deserialized.ParseFromString(serialized));
EXPECT_EQ(deserialized.DebugString(), context.DebugString());
}
TestCorrectness(serialized);
}
TEST(FastParse, NonPacked) {
TestCorrectness(
"\x0a\x0e\x0a\x0c\x0a\x03\x61\x67\x65\x12\x05\x1a\x03\x0a\x01\x0d");
}
TEST(FastParse, Packed) {
TestCorrectness(
"\x0a\x0d\x0a\x0b\x0a\x03\x61\x67\x65\x12\x04\x1a\x02\x08\x0d");
}
TEST(FastParse, ValueBeforeKeyInMap) {
TestCorrectness("\x0a\x12\x0a\x10\x12\x09\x0a\x07\x0a\x05value\x0a\x03key");
}
TEST(FastParse, EmptyFeatures) {
Example example;
example.mutable_features();
TestCorrectness(Serialize(example));
}
void TestCorrectnessJson(const string& json) {
auto resolver = protobuf::util::NewTypeResolverForDescriptorPool(
"type.googleapis.com", protobuf::DescriptorPool::generated_pool());
string serialized;
auto s = protobuf::util::JsonToBinaryString(
resolver, "type.googleapis.com/tensorflow.Example", json, &serialized);
EXPECT_TRUE(s.ok()) << s;
delete resolver;
TestCorrectness(serialized);
}
TEST(FastParse, JsonUnivalent) {
TestCorrectnessJson(
"{'features': {"
" 'feature': {'age': {'int64_list': {'value': [0]} }}, "
" 'feature': {'flo': {'float_list': {'value': [1.1]} }}, "
" 'feature': {'byt': {'bytes_list': {'value': ['WW8='] }}}"
"}}");
}
TEST(FastParse, JsonMultivalent) {
TestCorrectnessJson(
"{'features': {"
" 'feature': {'age': {'int64_list': {'value': [0, 13, 23]} }}, "
" 'feature': {'flo': {'float_list': {'value': [1.1, 1.2, 1.3]} }}, "
" 'feature': {'byt': {'bytes_list': {'value': ['WW8=', 'WW8K'] }}}"
"}}");
}
TEST(FastParse, SingleInt64) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
TestCorrectness(Serialize(example));
}
static string ExampleWithSomeFeatures() {
Example example;
(*example.mutable_features()->mutable_feature())[""];
(*example.mutable_features()->mutable_feature())["empty_bytes_list"]
.mutable_bytes_list();
(*example.mutable_features()->mutable_feature())["empty_float_list"]
.mutable_float_list();
(*example.mutable_features()->mutable_feature())["empty_int64_list"]
.mutable_int64_list();
BytesList* bytes_list =
(*example.mutable_features()->mutable_feature())["bytes_list"]
.mutable_bytes_list();
bytes_list->add_value("bytes1");
bytes_list->add_value("bytes2");
FloatList* float_list =
(*example.mutable_features()->mutable_feature())["float_list"]
.mutable_float_list();
float_list->add_value(1.0);
float_list->add_value(2.0);
Int64List* int64_list =
(*example.mutable_features()->mutable_feature())["int64_list"]
.mutable_int64_list();
int64_list->add_value(3);
int64_list->add_value(270);
int64_list->add_value(86942);
return Serialize(example);
}
TEST(FastParse, SomeFeatures) { TestCorrectness(ExampleWithSomeFeatures()); }
static void AddDenseFeature(const char* feature_name, DataType dtype,
PartialTensorShape shape, bool variable_length,
size_t elements_per_stride,
FastParseExampleConfig* out_config) {
out_config->dense.emplace_back();
auto& new_feature = out_config->dense.back();
new_feature.feature_name = feature_name;
new_feature.dtype = dtype;
new_feature.shape = std::move(shape);
new_feature.default_value = Tensor(dtype, {});
new_feature.variable_length = variable_length;
new_feature.elements_per_stride = elements_per_stride;
}
static void AddSparseFeature(const char* feature_name, DataType dtype,
FastParseExampleConfig* out_config) {
out_config->sparse.emplace_back();
auto& new_feature = out_config->sparse.back();
new_feature.feature_name = feature_name;
new_feature.dtype = dtype;
}
TEST(FastParse, StatsCollection) {
const size_t kNumExamples = 13;
std::vector<tstring> serialized(kNumExamples, ExampleWithSomeFeatures());
FastParseExampleConfig config_dense;
AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_dense);
AddDenseFeature("float_list", DT_FLOAT, {2}, false, 2, &config_dense);
AddDenseFeature("int64_list", DT_INT64, {3}, false, 3, &config_dense);
config_dense.collect_feature_stats = true;
FastParseExampleConfig config_varlen;
AddDenseFeature("bytes_list", DT_STRING, {-1}, true, 1, &config_varlen);
AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_varlen);
AddDenseFeature("int64_list", DT_INT64, {-1}, true, 1, &config_varlen);
config_varlen.collect_feature_stats = true;
FastParseExampleConfig config_sparse;
AddSparseFeature("bytes_list", DT_STRING, &config_sparse);
AddSparseFeature("float_list", DT_FLOAT, &config_sparse);
AddSparseFeature("int64_list", DT_INT64, &config_sparse);
config_sparse.collect_feature_stats = true;
FastParseExampleConfig config_mixed;
AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_mixed);
AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_mixed);
AddSparseFeature("int64_list", DT_INT64, &config_mixed);
config_mixed.collect_feature_stats = true;
for (const FastParseExampleConfig& config :
{config_dense, config_varlen, config_sparse, config_mixed}) {
{
Result result;
TF_CHECK_OK(FastParseExample(config, serialized, {}, nullptr, &result));
EXPECT_EQ(kNumExamples, result.feature_stats.size());
for (const PerExampleFeatureStats& stats : result.feature_stats) {
EXPECT_EQ(7, stats.features_count);
EXPECT_EQ(7, stats.feature_values_count);
}
}
{
Result result;
TF_CHECK_OK(FastParseSingleExample(config, serialized[0], &result));
EXPECT_EQ(1, result.feature_stats.size());
EXPECT_EQ(7, result.feature_stats[0].features_count);
EXPECT_EQ(7, result.feature_stats[0].feature_values_count);
}
}
}
string RandStr(random::SimplePhilox* rng) {
static const char key_char_lookup[] =
"0123456789{}~`!@#$%^&*()"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
auto len = 1 + rng->Rand32() % 200;
string str;
str.reserve(len);
while (len-- > 0) {
str.push_back(
key_char_lookup[rng->Rand32() % (sizeof(key_char_lookup) /
sizeof(key_char_lookup[0]))]);
}
return str;
}
void Fuzz(random::SimplePhilox* rng) {
auto num_keys = 1 + rng->Rand32() % 100;
std::unordered_set<string> unique_keys;
for (auto i = 0; i < num_keys; ++i) {
unique_keys.emplace(RandStr(rng));
}
Example example;
string serialized_example;
auto num_concats = 1 + rng->Rand32() % 4;
std::vector<Feature::KindCase> feat_types(
{Feature::kBytesList, Feature::kFloatList, Feature::kInt64List});
std::vector<string> all_keys(unique_keys.begin(), unique_keys.end());
while (num_concats--) {
example.Clear();
auto num_active_keys = 1 + rng->Rand32() % all_keys.size();
for (auto i = 0; i < num_active_keys; ++i) {
auto fkey = all_keys[rng->Rand32() % all_keys.size()];
auto ftype_idx = rng->Rand32() % feat_types.size();
auto num_features = 1 + rng->Rand32() % 5;
switch (static_cast<Feature::KindCase>(feat_types[ftype_idx])) {
case Feature::kBytesList: {
BytesList* bytes_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_bytes_list();
while (num_features--) {
bytes_list->add_value(RandStr(rng));
}
break;
}
case Feature::kFloatList: {
FloatList* float_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_float_list();
while (num_features--) {
float_list->add_value(rng->RandFloat());
}
break;
}
case Feature::kInt64List: {
Int64List* int64_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_int64_list();
while (num_features--) {
int64_list->add_value(rng->Rand64());
}
break;
}
default: {
LOG(QFATAL);
break;
}
}
}
serialized_example += example.SerializeAsString();
}
TestCorrectness(serialized_example);
}
TEST(FastParse, FuzzTest) {
const uint64 seed = 1337;
random::PhiloxRandom philox(seed);
random::SimplePhilox rng(&philox);
auto num_runs = 200;
while (num_runs--) {
LOG(INFO) << "runs left: " << num_runs;
Fuzz(&rng);
}
}
TEST(TestFastParseExample, Empty) {
Result result;
FastParseExampleConfig config;
config.sparse.push_back({"test", DT_STRING});
Status status =
FastParseExample(config, absl::Span<const tstring>(),
absl::Span<const tstring>(), nullptr, &result);
EXPECT_TRUE(status.ok()) << status;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/example_proto_fast_parsing_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b3ddb9f4-1b05-4c52-952e-978d27be77a3 | cpp | tensorflow/tensorflow | debug_data_dumper | tensorflow/core/util/debug_data_dumper.cc | tensorflow/core/util/debug_data_dumper_test.cc | #include "tensorflow/core/util/debug_data_dumper.h"
#include <optional>
#include <set>
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
DebugDataDumper* DebugDataDumper::Global() {
static DebugDataDumper* global_instance_ = new DebugDataDumper();
return global_instance_;
}
DebugDataDumper::DebugDataDumper() { LoadEnvvars(); }
void DebugDataDumper::LoadEnvvars() {
const char* dump_wrapped = getenv("TF_DUMP_GRAPH_WRAPPED");
dump_wrapped_ = static_cast<bool>(dump_wrapped);
const char* name_filter = getenv("TF_DUMP_GRAPH_NAME_FILTER");
name_filter_ =
name_filter ? std::optional<std::string>{name_filter} : std::nullopt;
const char* groups_filter = getenv("TF_DUMP_GRAPH_GROUPS");
groups_filter_ =
groups_filter ? std::set<std::string>(absl::StrSplit(groups_filter, ','))
: std::set<std::string>({kDebugGroupMain});
}
bool DebugDataDumper::ShouldDump(const std::string& name,
const std::string& group) const {
if (!dump_wrapped_ && absl::StartsWith(name, "__wrapped__")) return false;
if (name_filter_ == std::nullopt) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not set";
return false;
}
if (!absl::EqualsIgnoreCase(*name_filter_, "*") &&
!absl::StrContains(name, *name_filter_)) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not '*' and "
<< "it is not contained by the graph name";
return false;
}
if (groups_filter_.find(group) == groups_filter_.end() &&
groups_filter_.find("*") == groups_filter_.end())
return false;
return true;
}
void DebugDataDumper::DumpOpCreationStackTraces(const std::string& name,
const std::string& group,
const std::string& tag,
const Graph* graph) {
if (!ShouldDump(name, group)) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
DumpToFile(dump_filename, "", ".csv", "StackTrace",
[graph, &dump_filename](WritableFile* file) {
auto status = file->Append("node_id,node_name,stackframes\n");
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
for (Node* node : graph->nodes()) {
auto stack_trace = node->GetStackTrace();
if (stack_trace == nullptr) continue;
int node_id = node->id();
const std::string& node_name = node->name();
std::vector<std::string> stackframes;
stackframes.reserve(stack_trace->ToFrames().size());
for (auto& frame : stack_trace->ToFrames()) {
stackframes.push_back(
absl::StrFormat("%s(%d): %s", frame.file_name,
frame.line_number, frame.function_name));
}
status = file->Append(
absl::StrFormat("%d,%s,%s\n", node_id, node_name,
absl::StrJoin(stackframes, ";")));
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
}
return file->Close();
});
}
void DebugDataDumper::DumpGraph(const std::string& name,
const std::string& group,
const std::string& tag, const Graph* graph,
const FunctionLibraryDefinition* func_lib_def,
bool bypass_filter) {
if (!ShouldDump(name, group) && !bypass_filter) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
if (dump_filename.size() > 255) {
LOG(WARNING) << "Failed to dump graph " << dump_filename << " to "
<< ", because the file name is longer than 255";
return;
}
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
if (func_lib_def) {
FunctionLibraryDefinition reachable_lib_def =
func_lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
}
DumpGraphDefToFile(dump_filename, graph_def);
}
std::string DebugDataDumper::GetDumpFilename(const std::string& name,
const std::string& group,
const std::string& tag) {
std::string dump_name = name.empty() ? "unknown_graph" : name;
return absl::StrFormat("%s.%04d.%s.%s", dump_name, GetNextDumpId(name), group,
tag);
}
} | #include "tensorflow/core/util/debug_data_dumper.h"
#include <string>
#include "absl/strings/str_format.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(DebugDataDumper, NoPrefixTest) {
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, NoNameFilterTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, ShouldDumpTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DoNotDumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
setenv("TF_DUMP_GRAPH_GROUPS", "main,bridge_phase1_clustering", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
setenv("TF_DUMP_GRAPH_WRAPPED", "true", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
}
TEST(DebugDataDumper, DumpFileBasenameTest) {
EXPECT_EQ("DumpFileBasenameTest1.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag1"));
EXPECT_EQ("DumpFileBasenameTest1.0001.main.tag2",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag2"));
EXPECT_EQ("DumpFileBasenameTest2.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest2",
kDebugGroupMain, "tag1"));
}
TEST(DebugDataDumper, DumpGraphToFileTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpGraph("DumpGraphToFileTest", kDebugGroupMain, "tag",
&graph, nullptr, false);
std::string dumpFilename =
io::JoinPath(dir, "DumpGraphToFileTest.0000.main.tag.pbtxt");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, DumpGraphLongFileNameCrashTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::string name = std::string(256, 'x');
DEBUG_DATA_DUMPER()->DumpGraph(name, kDebugGroupMain, "tag", &graph, nullptr,
false);
std::string dumpFilename = io::JoinPath(
dir, absl::StrFormat("%s.0000.main.tag.pbtxt", name.c_str()));
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
TEST(DebugDataDumper, DumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
setenv("TF_DUMP_OP_CREATION_STACKTRACES", "1", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.csv");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, NoDumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.json");
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_data_dumper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_data_dumper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3718fbeb-f057-4522-8c2c-1f4e4ab2e411 | cpp | tensorflow/tensorflow | work_sharder | tensorflow/core/util/work_sharder.cc | tensorflow/core/util/work_sharder_test.cc | #include "tensorflow/core/util/work_sharder.h"
#include <algorithm>
#include <functional>
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace {
bool UseEigenParallelFor() {
static bool result = []() {
bool result = true;
if (auto status =
tsl::ReadBoolFromEnvVar("TF_USE_EIGEN_PARALLEL_FOR_IN_WORK_SHARDER",
true, &result);
status.ok()) {
return result;
}
return true;
}();
return result;
}
}
thread_local int per_thread_max_parallelism = 1000000;
void SetPerThreadMaxParallelism(int max_parallelism) {
CHECK_LE(0, max_parallelism);
per_thread_max_parallelism = max_parallelism;
}
int GetPerThreadMaxParallelism() { return per_thread_max_parallelism; }
void Shard(int max_parallelism, thread::ThreadPool* workers, int64_t total,
int64_t cost_per_unit, std::function<void(int64_t, int64_t)> work) {
CHECK_GE(total, 0);
if (total == 0) {
return;
}
max_parallelism = std::min(max_parallelism, GetPerThreadMaxParallelism());
if (max_parallelism <= 1) {
work(0, total);
return;
}
if (UseEigenParallelFor() && max_parallelism >= workers->NumThreads()) {
tsl::profiler::TraceMe trace_me([=, num_threads = workers->NumThreads()]() {
return tsl::profiler::TraceMeEncode("ParallelFor",
{{"cost_per_unit", cost_per_unit},
{"total", total},
{"max_parallelism", max_parallelism},
{"num_threads", num_threads}});
});
workers->ParallelFor(total, cost_per_unit, work);
return;
}
Sharder::Do(
total, cost_per_unit, work,
[&workers](Sharder::Closure c) { workers->Schedule(c); },
max_parallelism);
}
void Sharder::Do(int64_t total, int64_t cost_per_unit, const Work& work,
const Runner& runner, int max_parallelism) {
tsl::profiler::TraceMe trace_me([=]() {
return tsl::profiler::TraceMeEncode("Sharder::Do",
{{"cost_per_unit", cost_per_unit},
{"total", total},
{"max_parallelism", max_parallelism}});
});
cost_per_unit = std::max(int64_t{1}, cost_per_unit);
static const int64_t kMinCostPerShard = 10000;
const int num_shards =
std::max<int>(1, std::min(static_cast<int64_t>(max_parallelism),
total * cost_per_unit / kMinCostPerShard));
const int64_t block_size = (total + num_shards - 1) / num_shards;
CHECK_GT(block_size, 0);
if (block_size >= total) {
work(0, total);
return;
}
const int num_shards_used = (total + block_size - 1) / block_size;
BlockingCounter counter(num_shards_used - 1);
for (int64_t start = block_size; start < total; start += block_size) {
auto limit = std::min(start + block_size, total);
runner([&work, &counter, start, limit]() {
work(start, limit);
counter.DecrementCount();
});
}
work(0, std::min(block_size, total));
counter.Wait();
}
} | #include "tensorflow/core/util/work_sharder.h"
#include <algorithm>
#include <atomic>
#include <functional>
#include <vector>
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
void RunSharding(int64_t num_workers, int64_t total, int64_t cost_per_unit,
int64_t per_thread_max_parallelism,
thread::ThreadPool* threads) {
mutex mu;
int64_t num_shards = 0;
int64_t num_done_work = 0;
std::vector<bool> work(total, false);
Shard(num_workers, threads, total, cost_per_unit,
[=, &mu, &num_shards, &num_done_work, &work](int64_t start,
int64_t limit) {
VLOG(1) << "Shard [" << start << "," << limit << ")";
EXPECT_GE(start, 0);
EXPECT_LE(limit, total);
mutex_lock l(mu);
++num_shards;
for (; start < limit; ++start) {
EXPECT_FALSE(work[start]);
++num_done_work;
work[start] = true;
}
});
LOG(INFO) << num_workers << " " << total << " " << cost_per_unit << " "
<< num_shards;
EXPECT_EQ(num_done_work, total);
if (std::min(num_workers, per_thread_max_parallelism) <
threads->NumThreads()) {
EXPECT_LE(num_shards, 1 + per_thread_max_parallelism);
}
}
TEST(Shard, Basic) {
thread::ThreadPool threads(Env::Default(), "test", 16);
for (auto workers : {0, 1, 2, 3, 5, 7, 10, 11, 15, 100, 1000}) {
for (auto total : {0, 1, 7, 10, 64, 100, 256, 1000, 9999}) {
for (auto cost_per_unit : {0, 1, 11, 102, 1003, 10005, 1000007}) {
for (auto maxp : {1, 2, 4, 8, 100}) {
ScopedPerThreadMaxParallelism s(maxp);
RunSharding(workers, total, cost_per_unit, maxp, &threads);
}
}
}
}
}
TEST(Shard, OverflowTest) {
thread::ThreadPool threads(Env::Default(), "test", 3);
for (auto workers : {1, 2, 3}) {
const int64_t total_elements = 1LL << 32;
const int64_t cost_per_unit = 10;
std::atomic<int64_t> num_elements(0);
Shard(workers, &threads, total_elements, cost_per_unit,
[&num_elements](int64_t start, int64_t limit) {
num_elements += limit - start;
});
EXPECT_EQ(num_elements.load(), total_elements);
}
}
void BM_Sharding(::testing::benchmark::State& state) {
const int arg = state.range(0);
thread::ThreadPool threads(Env::Default(), "test", 16);
const int64_t total = 1LL << 30;
auto lambda = [](int64_t start, int64_t limit) {};
auto work = std::cref(lambda);
for (auto s : state) {
Shard(arg - 1, &threads, total, 1, work);
}
}
BENCHMARK(BM_Sharding)->Range(1, 128);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/work_sharder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/work_sharder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
823b8659-b857-41eb-84fe-0909c8614260 | cpp | tensorflow/tensorflow | tensor_slice_reader | tensorflow/core/util/tensor_slice_reader.cc | tensorflow/core/util/tensor_slice_reader_test.cc | #include "tensorflow/core/util/tensor_slice_reader.h"
#include <climits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/io/iterator.h"
#include "tensorflow/core/lib/io/table.h"
#include "tensorflow/core/lib/io/table_options.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
TensorSliceReader::Table::~Table() = default;
namespace {
class TensorSliceReaderTable : public TensorSliceReader::Table {
public:
explicit TensorSliceReaderTable(RandomAccessFile* f, table::Table* t)
: file_(f), table_(t) {}
~TensorSliceReaderTable() override {
delete table_;
delete file_;
}
bool Get(const string& key, string* value) override {
std::unique_ptr<table::Iterator> iter(table_->NewIterator());
iter->Seek(key);
if (iter->Valid() && iter->key() == key) {
StringPiece v = iter->value();
value->assign(v.data(), v.size());
return true;
} else {
return false;
}
}
private:
RandomAccessFile* file_;
table::Table* table_;
};
}
Status OpenTableTensorSliceReader(const string& fname,
TensorSliceReader::Table** result) {
*result = nullptr;
Env* env = Env::Default();
std::unique_ptr<RandomAccessFile> f;
Status s = env->NewRandomAccessFile(fname, &f);
if (s.ok()) {
uint64 file_size;
s = env->GetFileSize(fname, &file_size);
if (s.ok()) {
table::Options options;
table::Table* table;
s = table::Table::Open(options, f.get(), file_size, &table);
if (s.ok()) {
*result = new TensorSliceReaderTable(f.release(), table);
return absl::OkStatus();
} else {
s = errors::CreateWithUpdatedMessage(
s, strings::StrCat(s.message(),
": perhaps your file is in a different "
"file format and you need to use a "
"different restore operator?"));
}
}
}
LOG(WARNING) << "Could not open " << fname << ": " << s;
return s;
}
TensorSliceReader::TensorSliceReader(const string& filepattern)
: TensorSliceReader(filepattern, OpenTableTensorSliceReader,
kLoadAllShards) {}
TensorSliceReader::TensorSliceReader(const string& filepattern,
OpenTableFunction open_function)
: TensorSliceReader(filepattern, std::move(open_function), kLoadAllShards) {
}
TensorSliceReader::TensorSliceReader(const string& filepattern,
OpenTableFunction open_function,
int preferred_shard)
: filepattern_(filepattern), open_function_(std::move(open_function)) {
VLOG(1) << "TensorSliceReader for " << filepattern;
Status s = Env::Default()->GetMatchingPaths(filepattern, &fnames_);
if (!s.ok()) {
status_ = errors::InvalidArgument(
"Unsuccessful TensorSliceReader constructor: "
"Failed to get matching files on ",
filepattern, ": ", s.ToString());
return;
}
if (fnames_.empty()) {
status_ = errors::NotFound(
"Unsuccessful TensorSliceReader constructor: "
"Failed to find any matching files for ",
filepattern);
return;
}
sss_.resize(fnames_.size());
for (size_t shard = 0; shard < fnames_.size(); ++shard) {
fname_to_index_.insert(std::make_pair(fnames_[shard], shard));
}
if (preferred_shard == kLoadAllShards || fnames_.size() == 1 ||
static_cast<size_t>(preferred_shard) >= fnames_.size()) {
LoadAllShards();
} else {
VLOG(1) << "Loading shard " << preferred_shard << " for " << filepattern_;
LoadShard(preferred_shard);
}
}
void TensorSliceReader::LoadShard(int shard) const {
CHECK_LT(shard, sss_.size());
if (sss_[shard] || !status_.ok()) {
return;
}
string value;
SavedTensorSlices sts;
const string fname = fnames_[shard];
VLOG(1) << "Reading meta data from file " << fname << "...";
Table* table;
Status s = open_function_(fname, &table);
if (!s.ok()) {
status_ = errors::DataLoss("Unable to open table file ", fname, ": ",
s.ToString());
return;
}
sss_[shard].reset(table);
if (!(table->Get(kSavedTensorSlicesKey, &value) &&
ParseProtoUnlimited(&sts, value))) {
status_ = errors::Internal(
"Failed to find the saved tensor slices at the beginning of the "
"checkpoint file: ",
fname);
return;
}
status_ = CheckVersions(sts.meta().versions(), TF_CHECKPOINT_VERSION,
TF_CHECKPOINT_VERSION_MIN_PRODUCER, "Checkpoint",
"checkpoint");
if (!status_.ok()) return;
for (const SavedSliceMeta& ssm : sts.meta().tensor()) {
TensorShape ssm_shape;
status_ = TensorShape::BuildTensorShapeBase(ssm.shape(), &ssm_shape);
if (!status_.ok()) return;
for (const TensorSliceProto& tsp : ssm.slice()) {
TensorSlice ss_slice;
status_ = TensorSlice::BuildTensorSlice(tsp, &ss_slice);
if (!status_.ok()) return;
status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname,
ss_slice, &tensors_);
if (!status_.ok()) return;
}
}
}
void TensorSliceReader::LoadAllShards() const {
VLOG(1) << "Loading all shards for " << filepattern_;
for (size_t i = 0; i < fnames_.size() && status_.ok(); ++i) {
LoadShard(i);
}
all_shards_loaded_ = true;
}
const TensorSliceSet* TensorSliceReader::FindTensorSlice(
const string& name, const TensorSlice& slice,
std::vector<std::pair<TensorSlice, string>>* details) const {
const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name);
if (tss && !tss->QueryMeta(slice, details)) {
return nullptr;
}
return tss;
}
TensorSliceReader::~TensorSliceReader() {
for (auto& temp : tensors_) {
delete temp.second;
}
tensors_.clear();
}
bool TensorSliceReader::HasTensor(const string& name, TensorShape* shape,
DataType* type) const {
mutex_lock l(mu_);
const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name);
if (!tss && !all_shards_loaded_) {
VLOG(1) << "Did not find tensor in preferred shard, loading all shards: "
<< name;
LoadAllShards();
tss = gtl::FindPtrOrNull(tensors_, name);
}
if (tss) {
if (shape) {
*shape = tss->shape();
}
if (type) {
*type = tss->type();
}
return true;
} else {
return false;
}
}
Status TensorSliceReader::GetTensor(
const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor) const {
DataType type;
TensorShape shape;
TensorSlice slice;
{
mutex_lock l(mu_);
const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name);
if (tss == nullptr) {
return errors::NotFound(name, " not found in checkpoint file");
}
if (tss->Slices().size() > 1) {
return errors::Unimplemented("Sliced checkpoints are not supported");
}
type = tss->type();
shape = tss->shape();
slice = tss->Slices().begin()->second.slice;
}
std::unique_ptr<tensorflow::Tensor> t(new tensorflow::Tensor);
Status s = tensorflow::Tensor::BuildTensor(type, shape, t.get());
if (!s.ok()) return s;
for (const auto d : shape.dim_sizes()) {
if (d == LLONG_MAX) {
return errors::InvalidArgument("Unable to read dimensions of size ",
LLONG_MAX,
". Got shape: ", shape.DebugString());
}
}
bool success = false;
#define READER_COPY(dt) \
case dt: \
success = CopySliceData(name, slice, \
t->flat<EnumToDataType<dt>::Type>().data()); \
break;
switch (type) {
READER_COPY(DT_FLOAT);
READER_COPY(DT_DOUBLE);
READER_COPY(DT_INT32);
READER_COPY(DT_UINT8);
READER_COPY(DT_INT16);
READER_COPY(DT_INT8);
READER_COPY(DT_INT64);
READER_COPY(DT_STRING);
READER_COPY(DT_BOOL);
default:
return errors::Unimplemented("Data type not supported");
}
#undef READER_COPY
if (!success) {
return errors::NotFound(name, " not found in checkpoint file");
}
std::swap(*out_tensor, t);
return absl::OkStatus();
}
TensorSliceReader::VarToShapeMap TensorSliceReader::GetVariableToShapeMap()
const {
VarToShapeMap name_to_shape;
if (status().ok()) {
for (auto& e : Tensors()) {
name_to_shape[e.first] = e.second->shape();
}
}
return name_to_shape;
}
TensorSliceReader::VarToDataTypeMap
TensorSliceReader::GetVariableToDataTypeMap() const {
VarToDataTypeMap name_to_dtype;
if (status().ok()) {
for (auto& e : Tensors()) {
name_to_dtype[e.first] = e.second->type();
}
}
return name_to_dtype;
}
const string TensorSliceReader::DebugString() const {
string shape_str;
if (status().ok()) {
for (const auto& e : Tensors()) {
strings::StrAppend(&shape_str, e.first, " (",
DataType_Name(e.second->type()), ") ",
e.second->shape().DebugString());
const int num_slices = e.second->Slices().size();
if (num_slices > 1) {
strings::StrAppend(&shape_str, ", ", num_slices, " slices");
}
strings::StrAppend(&shape_str, "\n");
}
}
return shape_str;
}
}
} | #include "tensorflow/core/util/tensor_slice_reader.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/io/iterator.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/table.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice.pb.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#include "tensorflow/core/util/tensor_slice_writer.h"
namespace tensorflow {
namespace checkpoint {
namespace {
void SimpleFloatHelper(
const TensorSliceWriter::CreateBuilderFunction& create_function,
TensorSliceReader::OpenTableFunction open_function) {
const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint");
TensorShape shape({4, 5});
{
const string fname = strings::StrCat(fname_base, "_0");
TensorSliceWriter writer(fname, create_function);
const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
}
{
const string fname = strings::StrCat(fname_base, "_1");
TensorSliceWriter writer(fname, create_function);
{
const float data[] = {10, 11, 12, 15, 16, 17};
TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
const float data[] = {18, 19};
TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
}
const string filepattern = strings::StrCat(fname_base, "_*");
TensorSliceReader reader(filepattern, std::move(open_function));
TF_EXPECT_OK(reader.status());
EXPECT_EQ(2, reader.num_files());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("test", &shape, &type));
EXPECT_EQ("[4,5]", shape.DebugString());
EXPECT_EQ(DT_FLOAT, type);
EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr));
}
{
TensorSlice s = TensorSlice::ParseOrDie("0,2:-");
float expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
float results[10];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,1:-");
float expected[] = {5, 6, 7, 8, 9};
float results[5];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3");
float results[6];
EXPECT_FALSE(reader.CopySliceData("test", s, results));
}
}
TEST(TensorSliceReaderTest, SimpleFloat) {
SimpleFloatHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader);
}
template <typename T, typename U>
void SimpleIntXHelper(
const TensorSliceWriter::CreateBuilderFunction& create_function,
TensorSliceReader::OpenTableFunction open_function,
const string& checkpoint_file) {
const string fname_base = io::JoinPath(testing::TmpDir(), checkpoint_file);
TensorShape shape({4, 5});
{
const string fname = strings::StrCat(fname_base, "_0");
TensorSliceWriter writer(fname, create_function);
const T data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
}
{
const string fname = strings::StrCat(fname_base, "_1");
TensorSliceWriter writer(fname, create_function);
{
const T data[] = {10, 11, 12, 15, 16, 17};
TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
const T data[] = {18, 19};
TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
}
const string filepattern = strings::StrCat(fname_base, "_*");
TensorSliceReader reader(filepattern, std::move(open_function));
TF_EXPECT_OK(reader.status());
EXPECT_EQ(2, reader.num_files());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("test", &shape, &type));
EXPECT_EQ("[4,5]", shape.DebugString());
EXPECT_EQ(DataTypeToEnum<T>::v(), type);
EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr));
}
{
TensorSlice s = TensorSlice::ParseOrDie("0,2:-");
T expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
U results[10];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,1:-");
T expected[] = {5, 6, 7, 8, 9};
U results[5];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3");
U results[6];
EXPECT_FALSE(reader.CopySliceData("test", s, results));
}
}
#define TEST_SIMPLE_INT(TYPE, SAVED_TYPE) \
TEST(TensorSliceReaderTest, Simple##TYPE) { \
SimpleIntXHelper<TYPE, SAVED_TYPE>(CreateTableTensorSliceBuilder, \
OpenTableTensorSliceReader, \
#TYPE "_checkpoint"); \
}
TEST_SIMPLE_INT(int32, int32)
TEST_SIMPLE_INT(int64_t, int64_t)
TEST_SIMPLE_INT(int16, int32)
TEST_SIMPLE_INT(int8, int32)
TEST_SIMPLE_INT(uint8, int32)
void MutateSavedTensorSlices(
const std::string& fname,
const std::function<std::string(SavedTensorSlices)>& mutator) {
table::Options options;
options.compression = table::kNoCompression;
std::vector<std::pair<std::string, std::string>> entries;
{
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(Env::Default()->NewRandomAccessFile(fname, &file));
uint64 file_size;
TF_CHECK_OK(Env::Default()->GetFileSize(fname, &file_size));
table::Table* t;
TF_CHECK_OK(table::Table::Open(options, file.get(), file_size, &t));
std::unique_ptr<table::Table> table(t);
std::unique_ptr<table::Iterator> it(table->NewIterator());
for (it->Seek(""); it->Valid(); it->Next()) {
entries.emplace_back(it->key(), it->value());
}
TF_CHECK_OK(it->status());
}
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(Env::Default()->NewWritableFile(fname, &file));
table::TableBuilder builder(options, file.get());
for (const auto& entry : entries) {
SavedTensorSlices sts;
CHECK(sts.ParseFromString(entry.second));
builder.Add(entry.first, mutator(std::move(sts)));
}
TF_CHECK_OK(builder.Finish());
TF_CHECK_OK(file->Close());
}
}
TEST(TensorSliceReaderTest, MissingTensorType) {
const string fname = io::JoinPath(testing::TmpDir(), "invalid_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorShape shape({4, 5});
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
tensor.clear_type();
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
TF_CHECK_OK(reader.status());
EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr));
std::unique_ptr<Tensor> tensor;
EXPECT_FALSE(reader.GetTensor("test", &tensor).ok());
}
TEST(TensorSliceReaderTest, UnsupportedTensorType) {
const string fname = io::JoinPath(testing::TmpDir(), "int32_ref_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorShape shape({4, 5});
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
tensor.set_type(DT_INT32_REF);
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
TF_CHECK_OK(reader.status());
EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr));
std::unique_ptr<Tensor> tensor;
EXPECT_FALSE(reader.GetTensor("test", &tensor).ok());
}
TEST(TensorSliceReaderTest, NegativeTensorShapeDimension) {
const string fname =
io::JoinPath(testing::TmpDir(), "negative_dim_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}),
TensorSlice::ParseOrDie("0,2:-"), data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
for (auto& dim : *tensor.mutable_shape()->mutable_dim()) {
dim.set_size(-dim.size());
}
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
EXPECT_FALSE(reader.status().ok());
}
TEST(TensorSliceReaderTest, InvalidTensorSlice) {
const string fname =
io::JoinPath(testing::TmpDir(), "invalid_slice_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}),
TensorSlice::ParseOrDie("0,2:-"), data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
tensor.mutable_slice(0)->mutable_extent(0)->set_length(-10);
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
EXPECT_FALSE(reader.status().ok());
}
TEST(TensorSliceReaderTest, MissingTensorData) {
const string fname =
io::JoinPath(testing::TmpDir(), "missing_data_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TF_ASSERT_OK(writer.Add("test", TensorShape({4, 5}),
TensorSlice::ParseOrDie("0,2:-"), data));
TF_ASSERT_OK(writer.Finish());
MutateSavedTensorSlices(fname, [&](SavedTensorSlices sts) {
if (sts.has_data()) {
Fill(data, 4, sts.mutable_data()->mutable_data());
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
TF_ASSERT_OK(reader.status());
EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr));
std::unique_ptr<Tensor> tensor;
EXPECT_FALSE(reader.GetTensor("test", &tensor).ok());
}
void CachedTensorSliceReaderTesterHelper(
const TensorSliceWriter::CreateBuilderFunction& create_function,
const TensorSliceReader::OpenTableFunction& open_function) {
const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint");
TensorShape shape({4, 5});
{
const string fname = strings::StrCat(fname_base, "_0");
TensorSliceWriter writer(fname, create_function);
const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
}
{
const string fname = strings::StrCat(fname_base, "_1");
TensorSliceWriter writer(fname, create_function);
{
const float data[] = {10, 11, 12, 15, 16, 17};
TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
const float data[] = {18, 19};
TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
}
TensorSliceReaderCache cache;
const string filepattern = strings::StrCat(fname_base, "_*");
const TensorSliceReader* reader = cache.GetReader(
filepattern, open_function, TensorSliceReader::kLoadAllShards);
EXPECT_TRUE(reader != nullptr);
EXPECT_EQ(2, reader->num_files());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader->HasTensor("test", &shape, &type));
EXPECT_EQ("[4,5]", shape.DebugString());
EXPECT_EQ(DT_FLOAT, type);
EXPECT_FALSE(reader->HasTensor("don't exist", nullptr, nullptr));
}
const TensorSliceReader* reader2 = cache.GetReader(
filepattern, open_function, TensorSliceReader::kLoadAllShards);
EXPECT_EQ(reader, reader2);
reader = cache.GetReader("file_does_not_exist", open_function,
TensorSliceReader::kLoadAllShards);
EXPECT_TRUE(reader == nullptr);
}
TEST(CachedTensorSliceReaderTest, SimpleFloat) {
CachedTensorSliceReaderTesterHelper(CreateTableTensorSliceBuilder,
OpenTableTensorSliceReader);
}
static void VersionTest(const VersionDef& versions, const string& error) {
const string path = io::JoinPath(testing::TmpDir(), "checkpoint");
{
SavedTensorSlices sts;
*sts.mutable_meta()->mutable_versions() = versions;
string contents;
EXPECT_TRUE(sts.SerializeToString(&contents));
TensorSliceWriter::Builder* builder;
TF_ASSERT_OK(CreateTableTensorSliceBuilder(path, &builder));
builder->Add(kSavedTensorSlicesKey, contents);
int64_t file_size;
TF_EXPECT_OK(builder->Finish(&file_size));
delete builder;
}
TensorSliceReader reader(path, OpenTableTensorSliceReader);
EXPECT_TRUE(reader.status().code() == error::INVALID_ARGUMENT &&
absl::StartsWith(reader.status().message(), error))
<< "Expected error starting with '" << errors::InvalidArgument(error)
<< "', got '" << reader.status() << "'";
}
TEST(CheckpointVersionTest, MinConsumer) {
VersionDef versions;
versions.set_producer(TF_CHECKPOINT_VERSION + 1);
versions.set_min_consumer(TF_CHECKPOINT_VERSION + 1);
VersionTest(
versions,
strings::StrCat("Checkpoint min consumer version ",
TF_CHECKPOINT_VERSION + 1, " above current version ",
TF_CHECKPOINT_VERSION, " for TensorFlow"));
}
TEST(CheckpointVersionTest, MinProducer) {
VersionDef versions;
versions.set_producer(TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1);
VersionTest(versions, strings::StrCat("Checkpoint producer version ",
TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1,
" below min producer ",
TF_CHECKPOINT_VERSION_MIN_PRODUCER,
" supported by TensorFlow"));
}
TEST(CheckpointVersionTest, BadConsumer) {
VersionDef versions;
versions.set_producer(TF_CHECKPOINT_VERSION + 1);
versions.add_bad_consumers(TF_CHECKPOINT_VERSION);
VersionTest(
versions,
strings::StrCat(
"Checkpoint disallows consumer version ", TF_CHECKPOINT_VERSION,
". Please upgrade TensorFlow: this version is likely buggy."));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_reader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_reader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
89492635-9860-4f0b-9197-230866aa6354 | cpp | tensorflow/tensorflow | equal_graph_def | tensorflow/core/util/equal_graph_def.cc | tensorflow/core/util/equal_graph_def_test.cc | #include "tensorflow/core/util/equal_graph_def.h"
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
bool EqualGraphDef(const GraphDef& actual, const GraphDef& expected,
string* diff, const EqualGraphDefOptions& options) {
return EqualRepeatedNodeDef(actual.node(), expected.node(), diff, options);
}
uint64 GraphDefHash(const GraphDef& gdef, const EqualGraphDefOptions& options) {
return RepeatedNodeDefHash(gdef.node(), options);
}
bool EqualRepeatedNodeDef(const protobuf::RepeatedPtrField<NodeDef>& actual,
const protobuf::RepeatedPtrField<NodeDef>& expected,
string* diff, const EqualGraphDefOptions& options) {
std::unordered_map<string, const NodeDef*> actual_index;
for (const NodeDef& node : actual) {
actual_index[node.name()] = &node;
}
for (const NodeDef& expected_node : expected) {
auto actual_iter = actual_index.find(expected_node.name());
if (actual_iter == actual_index.end()) {
if (diff != nullptr) {
*diff = strings::StrCat("Did not find expected node '",
SummarizeNodeDef(expected_node), "'");
}
return false;
}
if (!EqualNodeDef(*actual_iter->second, expected_node, diff, options)) {
return false;
}
actual_index.erase(actual_iter);
}
if (!actual_index.empty()) {
if (diff != nullptr) {
*diff =
strings::StrCat("Found unexpected node '",
SummarizeNodeDef(*actual_index.begin()->second), "'");
}
return false;
}
return true;
}
uint64 RepeatedNodeDefHash(const protobuf::RepeatedPtrField<NodeDef>& ndefs,
const EqualGraphDefOptions& options) {
uint64 h = 0xDECAFCAFFE;
std::map<string, const NodeDef*> nodes;
for (const NodeDef& node : ndefs) {
nodes[node.name()] = &node;
}
for (const auto& pair : nodes) {
h = Hash64(pair.first.data(), pair.first.size(), h);
h = Hash64Combine(NodeDefHash(*pair.second, options), h);
}
return h;
}
namespace {
string JoinStringField(const protobuf::RepeatedPtrField<string>& f) {
string ret;
for (int i = 0; i < f.size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, f.Get(i));
}
return ret;
}
}
bool EqualNodeDef(const NodeDef& actual, const NodeDef& expected, string* diff,
const EqualGraphDefOptions& options) {
if (actual.name() != expected.name()) {
if (diff != nullptr) {
*diff = strings::StrCat("Actual node name '", actual.name(),
"' is not expected '", expected.name(), "'");
}
return false;
}
if (actual.op() != expected.op()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has op '",
actual.op(), "' that is not expected '",
expected.op(), "'");
}
return false;
}
if (actual.device() != expected.device()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has device '",
actual.device(), "' that is not expected '",
expected.device(), "'");
}
return false;
}
if (actual.input_size() != expected.input_size()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has inputs '",
JoinStringField(actual.input()),
"' that don't match expected '",
JoinStringField(expected.input()), "'");
}
return false;
}
int first_control_input = actual.input_size();
for (int i = 0; i < actual.input_size(); ++i) {
if (absl::StartsWith(actual.input(i), "^")) {
first_control_input = i;
break;
}
if (actual.input(i) != expected.input(i) &&
actual.input(i) != strings::StrCat(expected.input(i), ":0") &&
strings::StrCat(actual.input(i), ":0") != expected.input(i)) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has input ",
i, " '", actual.input(i),
"' that doesn't match expected '",
expected.input(i), "'");
}
return false;
}
}
std::unordered_set<string> actual_control;
std::unordered_set<string> expected_control;
for (int i = first_control_input; i < actual.input_size(); ++i) {
actual_control.insert(actual.input(i));
expected_control.insert(expected.input(i));
}
for (const auto& e : expected_control) {
if (actual_control.erase(e) == 0) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(),
"' missing expected control input '", e, "'");
}
return false;
}
}
if (!actual_control.empty()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(),
"' has unexpected control input '",
*actual_control.begin(), "'");
}
return false;
}
std::unordered_set<string> actual_attr;
for (const auto& a : actual.attr()) {
if (options.ignore_internal_attrs && !a.first.empty() &&
a.first[0] == '_') {
continue;
}
actual_attr.insert(a.first);
}
for (const auto& e : expected.attr()) {
if (options.ignore_internal_attrs && !e.first.empty() &&
e.first[0] == '_') {
continue;
}
if (actual_attr.erase(e.first) == 0) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(),
"' missing expected attr '", e.first,
"' with value: ", SummarizeAttrValue(e.second));
}
return false;
}
auto iter = actual.attr().find(e.first);
if (!AreAttrValuesEqual(e.second, iter->second)) {
if (diff != nullptr) {
*diff = strings::StrCat(
"Node named '", actual.name(), "' has attr '", e.first,
"' with value: ", SummarizeAttrValue(iter->second),
" that does not match expected: ", SummarizeAttrValue(e.second));
}
return false;
}
}
if (!actual_attr.empty()) {
if (diff != nullptr) {
*diff = strings::StrCat(
"Node named '", actual.name(), "' has unexpected attr '",
*actual_attr.begin(), "' with value: ",
SummarizeAttrValue(actual.attr().find(*actual_attr.begin())->second));
}
return false;
}
return true;
}
uint64 NodeDefHash(const NodeDef& ndef, const EqualGraphDefOptions& options) {
uint64 h = Hash64(ndef.name());
h = Hash64(ndef.op().data(), ndef.op().size(), h);
h = Hash64(ndef.device().data(), ndef.device().size(), h);
int first_control_input = ndef.input_size();
for (int i = 0; i < ndef.input_size(); ++i) {
if (absl::StartsWith(ndef.input(i), "^")) {
first_control_input = i;
break;
}
h = Hash64(ndef.input(i).data(), ndef.input(i).size(), h);
}
std::set<string> ndef_control;
for (int i = first_control_input; i < ndef.input_size(); ++i) {
ndef_control.insert(ndef.input(i));
}
for (const string& s : ndef_control) {
h = Hash64(s.data(), s.size(), h);
}
std::map<string, AttrValue> ndef_attr;
for (const auto& a : ndef.attr()) {
if (options.ignore_internal_attrs && !a.first.empty() &&
a.first[0] == '_') {
continue;
}
ndef_attr[a.first] = a.second;
}
for (const auto& a : ndef_attr) {
h = Hash64(a.first.data(), a.first.size(), h);
h = Hash64Combine(AttrValueHash(a.second), h);
}
return h;
}
} | #include <utility>
#include "tensorflow/core/util/equal_graph_def.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
REGISTER_OP("Input").Output("o: float");
REGISTER_OP("Alternate").Output("o: float");
REGISTER_OP("Combine").Input("a: float").Input("b: float").Output("o: float");
Node* Input(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("Input", opts);
}
Node* Alternate(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("Alternate", opts);
}
Node* Combine(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("Combine", std::move(a), std::move(b), opts);
}
class EqualGraphDefTest : public ::testing::Test {
protected:
EqualGraphDefTest()
: e_(GraphDefBuilder::kFailImmediately),
a_(GraphDefBuilder::kFailImmediately) {}
bool Match() {
GraphDef expected;
TF_EXPECT_OK(e_.ToGraphDef(&expected));
GraphDef actual;
TF_EXPECT_OK(a_.ToGraphDef(&actual));
bool match = EqualGraphDef(actual, expected, &diff_);
if (match) {
EXPECT_EQ(GraphDefHash(expected), GraphDefHash(actual));
} else {
EXPECT_NE(GraphDefHash(expected), GraphDefHash(actual));
}
return match;
}
GraphDefBuilder e_;
GraphDefBuilder a_;
string diff_;
};
TEST_F(EqualGraphDefTest, Match) {
Input(e_.opts().WithName("A"));
Input(a_.opts().WithName("A"));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, NoMatch) {
Input(e_.opts().WithName("A"));
Input(a_.opts().WithName("B"));
EXPECT_FALSE(Match());
EXPECT_EQ("Did not find expected node '{{node A}} = Input[]()'", diff_);
}
TEST_F(EqualGraphDefTest, MissingNode) {
Input(e_.opts().WithName("A"));
Input(e_.opts().WithName("B"));
Input(a_.opts().WithName("A"));
EXPECT_FALSE(Match());
EXPECT_EQ("Did not find expected node '{{node B}} = Input[]()'", diff_);
}
TEST_F(EqualGraphDefTest, ExtraNode) {
Input(e_.opts().WithName("A"));
Input(a_.opts().WithName("A"));
Input(a_.opts().WithName("B"));
EXPECT_FALSE(Match());
EXPECT_EQ("Found unexpected node '{{node B}} = Input[]()'", diff_);
}
TEST_F(EqualGraphDefTest, NodeOrder) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Combine(a, b, e_.opts().WithName("C"));
b = Input(a_.opts().WithName("B"));
a = Input(a_.opts().WithName("A"));
Combine(a, b, a_.opts().WithName("C"));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, NameMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
EXPECT_FALSE(EqualNodeDef(a->def(), b->def(), &diff_));
EXPECT_EQ("Actual node name 'A' is not expected 'B'", diff_);
}
TEST_F(EqualGraphDefTest, OpMismatch) {
Input(e_.opts().WithName("A"));
Alternate(a_.opts().WithName("A"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'A' has op 'Alternate' that is not expected 'Input'",
diff_);
}
TEST_F(EqualGraphDefTest, DeviceMatch) {
Input(e_.opts().WithName("A").WithDevice("/cpu:0"));
Input(a_.opts().WithName("A").WithDevice("/cpu:0"));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, DeviceMismatch) {
Input(e_.opts().WithName("A").WithDevice("/cpu:0"));
Input(a_.opts().WithName("A").WithDevice("/cpu:1"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'A' has device '/cpu:1' that is not expected '/cpu:0'",
diff_);
}
TEST_F(EqualGraphDefTest, InputMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Combine(a, a, e_.opts().WithName("C"));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
Combine(b, b, a_.opts().WithName("C"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'C' has input 0 'B' that doesn't match expected 'A'",
diff_);
}
TEST_F(EqualGraphDefTest, InputOrderMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Combine(a, b, e_.opts().WithName("C"));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
Combine(b, a, a_.opts().WithName("C"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'C' has input 0 'B' that doesn't match expected 'A'",
diff_);
}
TEST_F(EqualGraphDefTest, ControlInputOrder) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Node* d = Input(e_.opts().WithName("D"));
Combine(a, a,
e_.opts()
.WithName("E")
.WithControlInput(b)
.WithControlInput(c)
.WithControlInput(d));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
d = Input(a_.opts().WithName("D"));
Combine(a, a,
a_.opts()
.WithName("E")
.WithControlInput(c)
.WithControlInput(d)
.WithControlInput(b));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, ControlInputMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Node* d = Input(e_.opts().WithName("D"));
Combine(a, a,
e_.opts().WithName("E").WithControlInput(b).WithControlInput(c));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
d = Input(a_.opts().WithName("D"));
Combine(a, a,
a_.opts().WithName("E").WithControlInput(b).WithControlInput(d));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'E' missing expected control input '^C'", diff_);
}
TEST_F(EqualGraphDefTest, ControlInputAdded) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Combine(a, a, e_.opts().WithName("D").WithControlInput(b));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
Combine(a, a,
a_.opts().WithName("D").WithControlInput(b).WithControlInput(c));
EXPECT_FALSE(Match());
EXPECT_EQ(
"Node named 'D' has inputs 'A, A, ^B, ^C' that don't match "
"expected 'A, A, ^B'",
diff_);
}
TEST_F(EqualGraphDefTest, ControlInputRemoved) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Combine(a, a,
e_.opts().WithName("D").WithControlInput(b).WithControlInput(c));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
Combine(a, a, a_.opts().WithName("D").WithControlInput(b));
EXPECT_FALSE(Match());
EXPECT_EQ(
"Node named 'D' has inputs 'A, A, ^B' that don't match "
"expected 'A, A, ^B, ^C'",
diff_);
}
TEST_F(EqualGraphDefTest, Attr) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef same(a->def());
AddNodeAttr("foo", "bar", &same);
EXPECT_TRUE(EqualNodeDef(same, same, &diff_)) << diff_;
}
TEST_F(EqualGraphDefTest, AttrAdded) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
EXPECT_FALSE(EqualNodeDef(actual, a->def(), &diff_));
EXPECT_EQ("Node named 'A' has unexpected attr 'foo' with value: \"bar\"",
diff_);
}
TEST_F(EqualGraphDefTest, AttrRemoved) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef expected(a->def());
AddNodeAttr("foo", "bar", &expected);
EXPECT_FALSE(EqualNodeDef(a->def(), expected, &diff_));
EXPECT_EQ("Node named 'A' missing expected attr 'foo' with value: \"bar\"",
diff_);
}
TEST_F(EqualGraphDefTest, AttrOrder) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
AddNodeAttr("baz", 42, &actual);
NodeDef expected(a->def());
AddNodeAttr("baz", 42, &expected);
AddNodeAttr("foo", "bar", &expected);
EXPECT_TRUE(EqualNodeDef(actual, expected, &diff_)) << diff_;
}
TEST_F(EqualGraphDefTest, AttrMismatch) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
AddNodeAttr("baz", 5, &actual);
NodeDef expected(a->def());
AddNodeAttr("baz", 42, &expected);
AddNodeAttr("foo", "bar", &expected);
EXPECT_FALSE(EqualNodeDef(actual, expected, &diff_));
EXPECT_EQ(
"Node named 'A' has attr 'baz' with value: 5 that does not match "
"expected: 42",
diff_);
}
TEST_F(EqualGraphDefTest, IgnoreInternalAttrs) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
AddNodeAttr("_class", 5, &actual);
NodeDef expected(a->def());
AddNodeAttr("foo", "bar", &expected);
AddNodeAttr("_kernel", "eigen", &actual);
EXPECT_TRUE(EqualNodeDef(actual, expected, &diff_));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/equal_graph_def.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/equal_graph_def_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3149689d-d850-4204-b1f7-f4154c122891 | cpp | tensorflow/tensorflow | proto_utils | tensorflow/core/util/proto/proto_utils.cc | tensorflow/core/util/proto/proto_utils_test.cc | #include "tensorflow/core/util/proto/proto_utils.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace proto_utils {
using tensorflow::protobuf::FieldDescriptor;
using tensorflow::protobuf::internal::WireFormatLite;
bool IsCompatibleType(FieldDescriptor::Type field_type, DataType dtype) {
switch (field_type) {
case WireFormatLite::TYPE_DOUBLE:
return dtype == tensorflow::DT_DOUBLE;
case WireFormatLite::TYPE_FLOAT:
return dtype == tensorflow::DT_FLOAT || dtype == tensorflow::DT_DOUBLE;
case WireFormatLite::TYPE_INT64:
return dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_UINT64:
return dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_INT32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_FIXED64:
return dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_FIXED32:
return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_BOOL:
return dtype == tensorflow::DT_BOOL;
case WireFormatLite::TYPE_STRING:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_GROUP:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_MESSAGE:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_BYTES:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_UINT32:
return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_ENUM:
return dtype == tensorflow::DT_INT32;
case WireFormatLite::TYPE_SFIXED32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SFIXED64:
return dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SINT32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SINT64:
return dtype == tensorflow::DT_INT64;
}
}
Status ParseTextFormatFromString(absl::string_view input,
protobuf::Message* output) {
DCHECK(output != nullptr) << "output must be non NULL";
if (output == nullptr) {
LOG(ERROR) << "output must be non NULL";
return Status(absl::StatusCode::kInvalidArgument,
"output must be non NULL");
}
string err;
StringErrorCollector err_collector(&err, true);
protobuf::TextFormat::Parser parser;
parser.RecordErrorsTo(&err_collector);
if (!parser.ParseFromString(string(input), output)) {
return Status(absl::StatusCode::kInvalidArgument, err);
}
return absl::OkStatus();
}
StringErrorCollector::StringErrorCollector(string* error_text)
: StringErrorCollector(error_text, false) {}
StringErrorCollector::StringErrorCollector(string* error_text,
bool one_indexing)
: error_text_(error_text), index_offset_(one_indexing ? 1 : 0) {
DCHECK(error_text_ != nullptr) << "error_text must be non NULL";
if (error_text_ == nullptr) {
LOG(ERROR) << "error_text must be non NULL";
}
}
void StringErrorCollector::AddError(int line, int column,
const string& message) {
if (error_text_ != nullptr) {
absl::SubstituteAndAppend(error_text_, "$0($1): $2\n", line + index_offset_,
column + index_offset_, message);
}
}
void StringErrorCollector::AddWarning(int line, int column,
const string& message) {
AddError(line, column, message);
}
}
} | #include "tensorflow/core/util/proto/proto_utils.h"
#include <gmock/gmock.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using proto_utils::ParseTextFormatFromString;
using proto_utils::StringErrorCollector;
using ::testing::ContainsRegex;
TEST(ParseTextFormatFromStringTest, Success) {
protobuf::DescriptorProto output;
TF_ASSERT_OK(ParseTextFormatFromString("name: \"foo\"", &output));
EXPECT_EQ(output.name(), "foo");
}
TEST(ParseTextFormatFromStringTest, ErrorOnInvalidSyntax) {
protobuf::DescriptorProto output;
Status status = ParseTextFormatFromString("name: foo", &output);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("foo"));
EXPECT_FALSE(output.has_name());
}
TEST(ParseTextFormatFromStringTest, ErrorOnUnknownFieldName) {
protobuf::DescriptorProto output;
Status status = ParseTextFormatFromString("badname: \"foo\"", &output);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("badname"));
EXPECT_FALSE(output.has_name());
}
TEST(ParseTextFormatFromStringTest, DiesOnNullOutputPointer) {
#ifndef NDEBUG
ASSERT_DEATH(ParseTextFormatFromString("foo", nullptr).IgnoreError(),
"output.*non NULL");
#else
Status status = ParseTextFormatFromString("foo", nullptr);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("output.*non NULL"));
#endif
}
TEST(StringErrorCollectorTest, AppendsError) {
string err;
StringErrorCollector collector(&err);
collector.AddError(1, 2, "foo");
EXPECT_EQ("1(2): foo\n", err);
}
TEST(StringErrorCollectorTest, AppendsWarning) {
string err;
StringErrorCollector collector(&err);
collector.AddWarning(1, 2, "foo");
EXPECT_EQ("1(2): foo\n", err);
}
TEST(StringErrorCollectorTest, AppendsMultipleError) {
string err;
StringErrorCollector collector(&err);
collector.AddError(1, 2, "foo");
collector.AddError(3, 4, "bar");
EXPECT_EQ("1(2): foo\n3(4): bar\n", err);
}
TEST(StringErrorCollectorTest, AppendsMultipleWarning) {
string err;
StringErrorCollector collector(&err);
collector.AddWarning(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
EXPECT_EQ("1(2): foo\n3(4): bar\n", err);
}
TEST(StringErrorCollectorTest, OffsetWorks) {
string err;
StringErrorCollector collector(&err, true);
collector.AddError(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
EXPECT_EQ("2(3): foo\n4(5): bar\n", err);
}
TEST(StringErrorCollectorTest, DiesOnNullErrorText) {
#ifndef NDEBUG
ASSERT_DEATH(StringErrorCollector(nullptr), "error_text.*non NULL");
#else
StringErrorCollector collector(nullptr);
collector.AddError(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/proto/proto_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/proto/proto_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a665630-8cc7-44cb-a331-2ac303dc883c | cpp | tensorflow/tensorflow | descriptor_pool_registry | tensorflow/core/util/proto/descriptor_pool_registry.cc | tensorflow/core/util/proto/descriptor_pool_registry_test.cc | #include <string>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/proto/descriptor_pool_registry.h"
namespace tensorflow {
DescriptorPoolRegistry* DescriptorPoolRegistry::Global() {
static DescriptorPoolRegistry* registry = new DescriptorPoolRegistry;
return registry;
}
DescriptorPoolRegistry::DescriptorPoolFn* DescriptorPoolRegistry::Get(
const string& source) {
auto found = fns_.find(source);
if (found == fns_.end()) return nullptr;
return &found->second;
}
void DescriptorPoolRegistry::Register(
const string& source,
const DescriptorPoolRegistry::DescriptorPoolFn& pool_fn) {
auto existing = Get(source);
CHECK_EQ(existing, nullptr)
<< "descriptor pool for source: " << source << " already registered";
fns_.insert(std::pair<const string&, DescriptorPoolFn>(source, pool_fn));
}
} | #include "tensorflow/core/util/proto/descriptor_pool_registry.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
struct Value {
static Status Function(
tensorflow::protobuf::DescriptorPool const** desc_pool,
std::unique_ptr<tensorflow::protobuf::DescriptorPool>* owned_desc_pool) {
return absl::OkStatus();
}
};
REGISTER_DESCRIPTOR_POOL("TEST POOL 1", Value::Function);
REGISTER_DESCRIPTOR_POOL("TEST POOL 2", Value::Function);
}
TEST(DescriptorPoolRegistryTest, TestBasic) {
EXPECT_EQ(DescriptorPoolRegistry::Global()->Get("NON-EXISTENT"), nullptr);
auto pool1 = DescriptorPoolRegistry::Global()->Get("TEST POOL 1");
EXPECT_NE(pool1, nullptr);
auto pool2 = DescriptorPoolRegistry::Global()->Get("TEST POOL 2");
EXPECT_NE(pool2, nullptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/proto/descriptor_pool_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/proto/descriptor_pool_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f0bf610-f23a-4dd6-9bfb-f1d3f2b30cc9 | cpp | tensorflow/tensorflow | uniform_quant_ops_params | tensorflow/core/util/quantization/uniform_quant_ops_params.cc | tensorflow/core/util/quantization/uniform_quant_ops_params_test.cc | #include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
Status ValidDim(int64_t dims, int64_t dim) {
if (dim < 0 || dim >= dims) {
return InvalidArgument(
"Each dimension number must be in region [0, rank). Given rank ", dims,
" and dimension number value ", dim);
}
return absl::OkStatus();
}
Status ValidSpatialDimensions(
int64_t dims, const protobuf::RepeatedField<int64_t>& spatial_dimensions) {
if (spatial_dimensions.size() != dims - 2) {
return InvalidArgument(
"Spatial dimensions size must be rank - 2. Given rank ", dims,
" and spatial dimensions size ", spatial_dimensions.size());
}
for (int i = 0; i < spatial_dimensions.size(); ++i) {
TF_RETURN_IF_ERROR(ValidDim(dims, spatial_dimensions.Get(i)));
}
return absl::OkStatus();
}
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const OpKernelConstruction& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const shape_inference::InferenceContext& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::ValidateOrFillParamsAndValidateShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
if (lhs_shape.dims() != rhs_shape.dims()) {
return InvalidArgument(
"lhs and rhs must have same dims. Given lhs and rhs of shapes: ",
lhs_shape.DebugString(), rhs_shape.DebugString());
}
const int64_t dims = lhs_shape.dims();
if (dims <= 2) {
return InvalidArgument("lhs and rhs shape dims must be at least 3. Given: ",
dims);
}
const int64_t num_spatial_dims = dims - 2;
if (window_strides_.empty()) {
window_strides_.resize(num_spatial_dims, 1);
} else if (window_strides_.size() != num_spatial_dims) {
return InvalidArgument("Size of window_strides Attr must be dims - 2.");
} else if (!absl::c_all_of(window_strides_,
[](int stride) { return stride >= 1; })) {
return InvalidArgument(
"All elements of window_strides must be >= 1. Given ",
absl::StrJoin(window_strides_, ", "));
}
if (lhs_dilation_.empty()) {
lhs_dilation_.resize(num_spatial_dims, 1);
} else if (lhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of lhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(lhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of lhs_dilation must be >= 1. Given ",
absl::StrJoin(lhs_dilation_, ", "));
}
if (rhs_dilation_.empty()) {
rhs_dilation_.resize(num_spatial_dims, 1);
} else if (rhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of rhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(rhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of rhs_dilation must be >= 1. Given ",
absl::StrJoin(rhs_dilation_, ", "));
}
if (dimension_numbers_.input_spatial_dimensions_size() == 0) {
dimension_numbers_.set_input_batch_dimension(0);
dimension_numbers_.set_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_input_spatial_dimensions(2 + i);
}
dimension_numbers_.set_kernel_output_feature_dimension(0);
dimension_numbers_.set_kernel_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_kernel_spatial_dimensions(2 + i);
}
dimension_numbers_.set_output_batch_dimension(0);
dimension_numbers_.set_output_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_output_spatial_dimensions(2 + i);
}
} else {
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.input_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_input_feature_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_output_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.kernel_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.output_spatial_dimensions()));
}
if (feature_group_count_ <= 0) {
return InvalidArgument(
"feature_group_count must be a positive integer, given: ",
feature_group_count_);
}
const int64_t lhs_feature_count =
lhs_shape.dim_size(dimension_numbers_.input_feature_dimension());
if (lhs_feature_count % feature_group_count_) {
return InvalidArgument(
"feature_group_count must divide lhs feature dimension size, but ",
feature_group_count_, " does not divide ", lhs_feature_count);
}
const int64_t rhs_input_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_input_feature_dimension());
if (lhs_feature_count % rhs_input_feature_count) {
return InvalidArgument(
"rhs input feature dimension must divide lhs feature dimension "
"size, but ",
rhs_input_feature_count, " does not divide ", lhs_feature_count);
}
if (lhs_feature_count / feature_group_count_ != rhs_input_feature_count) {
return InvalidArgument(
"lhs feature dimension size divided by feature_group_count must equal "
"the rhs input feature dimension size, but ",
lhs_feature_count, " / ", feature_group_count_,
" != ", rhs_input_feature_count);
}
const int64_t rhs_output_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
if (rhs_output_feature_count % feature_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of feature_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ",
feature_group_count_);
}
if (batch_group_count_ <= 0) {
return InvalidArgument(
"batch_group_count Attr must be a positive integer. Given: ",
batch_group_count_);
}
const int64_t lhs_batch_count =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension());
if (lhs_batch_count % batch_group_count_) {
return InvalidArgument(
"batch_group_count must divide lhs batch dimension size, but ",
batch_group_count_, " does not divide ", lhs_batch_count);
}
if (rhs_output_feature_count % batch_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of batch_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ", batch_group_count_);
}
return ValidateOrFillPaddingList(lhs_shape, rhs_shape);
}
absl::StatusOr<TensorShape>
UniformQuantizedConvolutionParams::CalculateOutputShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) const {
std::vector<int64_t> output_shape_buf(lhs_shape.dims());
output_shape_buf[dimension_numbers_.output_batch_dimension()] =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()) /
batch_group_count_;
output_shape_buf[dimension_numbers_.output_feature_dimension()] =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) {
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size_numerator =
lhs_size_dilated + padding_list_[2 * i] + padding_list_[2 * i + 1] -
rhs_size_dilated + 1;
const int64_t output_size_denominator = window_strides_[i];
output_shape_buf[dimension_numbers_.output_spatial_dimensions(i)] =
(output_size_numerator + output_size_denominator - 1) /
output_size_denominator;
}
TensorShape output_shape;
TF_RETURN_IF_ERROR(
TensorShape::BuildTensorShape(output_shape_buf, &output_shape));
return output_shape;
}
template <typename ContextT>
Status UniformQuantizedConvolutionParams::LoadFromAttrsInternal(
const ContextT& context) {
TF_RETURN_IF_ERROR(context.GetAttr("window_strides", &window_strides_));
TF_RETURN_IF_ERROR(context.GetAttr("lhs_dilation", &lhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("rhs_dilation", &rhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("batch_group_count", &batch_group_count_));
TF_RETURN_IF_ERROR(
context.GetAttr("feature_group_count", &feature_group_count_));
TF_RETURN_IF_ERROR(context.GetAttr("padding", &padding_));
TF_RETURN_IF_ERROR(context.GetAttr("explicit_padding", &padding_list_));
if (padding_ != "EXPLICIT" && padding_ != "SAME" && padding_ != "VALID") {
return InvalidArgument(
"padding Attr must be one of [EXPLICIT | SAME | VALID], but given: ",
padding_);
} else if (padding_ != "EXPLICIT" && !padding_list_.empty()) {
return InvalidArgument(
"If padding Attr is not 'EXPLICIT', explicit_padding Attr must be "
"empty. Given padding ",
padding_, " and explicit_padding of size ", padding_list_.size());
}
std::string dimension_numbers_str;
TF_RETURN_IF_ERROR(
context.GetAttr("dimension_numbers", &dimension_numbers_str));
if (dimension_numbers_str.empty()) {
dimension_numbers_.Clear();
} else if (!dimension_numbers_.ParseFromString(dimension_numbers_str)) {
return InvalidArgument("Error parsing convolution dimension numbers.");
}
return absl::OkStatus();
}
Status UniformQuantizedConvolutionParams::ValidateOrFillPaddingList(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
const int64_t dims = lhs_shape.dims();
const int64_t padding_list_size = 2 * (dims - 2);
if (padding_ == "EXPLICIT") {
if (padding_list_.size() != padding_list_size) {
return InvalidArgument(
"Size of explicit_padding Attr must be 2 * (rank - 2). Given rank ",
dims, " and explicit_padding of size ", padding_list_.size());
} else if (!absl::c_all_of(padding_list_,
[](int elem) { return elem >= 0; })) {
return InvalidArgument("All explicit_padding elems must be >= 0, Given ",
absl::StrJoin(padding_list_, ", "));
}
} else if (padding_ == "VALID") {
padding_list_.resize(padding_list_size, 0);
} else {
padding_list_.resize(padding_list_size);
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size();
++i) {
const int64_t stride = window_strides_[i];
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size = (lhs_size_dilated + stride - 1) / stride;
const int64_t total_padding = std::max(
(output_size - 1) * stride + rhs_size_dilated - lhs_size_dilated,
static_cast<int64_t>(0));
const int64_t padding_begin = total_padding / 2;
const int64_t padding_end = total_padding - padding_begin;
padding_list_[2 * i] = padding_begin;
padding_list_[2 * i + 1] = padding_end;
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace tensorflow {
namespace {
using protobuf::TextFormat;
using ::testing::ElementsAreArray;
TEST(UniformQuantizedConvolutionParamsTest, DilatedSize) {
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(0, 2), 0);
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(10, 3), 28);
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 2, 3, 4},
{3, 2, 2, 3}));
EXPECT_THAT(params.window_strides(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.lhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.rhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({0, 0, 0, 0}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({2, 3}));
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 3, 4, 2},
{2, 3, 1, 2}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({1, 2}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 3);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 2);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({0, 1}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({1, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 2, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 2, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
const TensorShape lhs_shape({2, 3, 4, 2});
const TensorShape rhs_shape({2, 3, 1, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 3, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateSameOptionPadding) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"SAME");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 4, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 2, 1, 1}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/quantization/uniform_quant_ops_params.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/quantization/uniform_quant_ops_params_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2589f64c-eb8c-442a-8faa-a014e79a40f7 | cpp | tensorflow/tensorflow | conv_map_wrapper | tensorflow/core/util/autotune_maps/conv_map_wrapper.cc | tensorflow/core/util/autotune_maps/conv_map_wrapper_test.cc | #include "tensorflow/core/util/autotune_maps/conv_map_wrapper.h"
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "tensorflow/core/util/autotune_maps/autotune_map.pb.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
namespace tensorflow {
absl::StatusOr<ConvMapWrapper> ConvMapWrapper::FromKeyAndValue(
OpaqueKey key, OpaqueValue value) {
ConvMapProto::Entry key_proto;
if (!key_proto.ParseFromString(key)) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Could not parse the provided key");
}
ConvMapProto::Entry value_proto;
if (!value_proto.ParseFromString(value)) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Could not parse the provided value");
}
ConvMapProto::Entry full_entry;
*full_entry.mutable_key() = key_proto.key();
*full_entry.mutable_value() = value_proto.value();
return ConvMapWrapper(full_entry);
}
ConvMapWrapper::OpaqueKey ConvMapWrapper::Key() const {
ConvMapProto::Entry entry;
*entry.mutable_key() = conv_map_entry_.key();
OpaqueKey serialized;
CHECK(tsl::SerializeToStringDeterministic(entry, &serialized));
return serialized;
}
ConvMapWrapper::OpaqueValue ConvMapWrapper::Value() const {
ConvMapProto::Entry entry;
*entry.mutable_value() = conv_map_entry_.value();
OpaqueValue serialized;
CHECK(tsl::SerializeToStringDeterministic(entry, &serialized));
return serialized;
}
std::vector<ConvMapWrapper> ConvMapWrapper::ConvMapToWrappers(
const ConvMapProto& autotune_results) {
std::vector<ConvMapWrapper> wrappers;
wrappers.reserve(autotune_results.kv_pairs_size());
for (const auto& entry : autotune_results.kv_pairs()) {
wrappers.push_back(ConvMapWrapper(entry));
}
return wrappers;
}
absl::StatusOr<ConvMapProto> ConvMapWrapper::ConvMapFromWrappers(
const std::vector<ConvMapWrapper>& wrappers) {
ConvMapProto conv_map_proto;
for (const auto& wrapper : wrappers) {
*conv_map_proto.add_kv_pairs() = wrapper.conv_map_entry_;
}
return conv_map_proto;
}
} | #include "tensorflow/core/util/autotune_maps/conv_map_wrapper.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/test.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "tensorflow/core/util/autotune_maps/autotune_map.pb.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
ConvMapProto ThreeConvMapEntries() {
ConvMapProto proto;
auto r1 = proto.add_kv_pairs();
r1->mutable_key()->set_batch(1);
r1->mutable_key()->set_in_depths(2);
r1->mutable_key()->set_out_depths(3);
r1->mutable_value()->mutable_algorithm()->set_algo_id(4);
auto r2 = proto.add_kv_pairs();
r2->mutable_key()->set_batch(5);
r2->mutable_key()->set_in_depths(6);
r2->mutable_key()->set_out_depths(7);
r2->mutable_value()->mutable_algorithm()->set_algo_id(8);
auto r3 = proto.add_kv_pairs();
r3->mutable_key()->set_batch(9);
r3->mutable_key()->set_in_depths(10);
r3->mutable_key()->set_out_depths(11);
r3->mutable_value()->mutable_algorithm()->set_algo_id(12);
return proto;
}
TEST(ConvMapWrapperTest, FullRoundTrip) {
std::vector<ConvMapWrapper> wrappers =
ConvMapWrapper::ConvMapToWrappers(ThreeConvMapEntries());
std::vector<std::pair<ConvMapWrapper::OpaqueKey, ConvMapWrapper::OpaqueValue>>
key_value_pairs;
for (const auto& wrapper : wrappers) {
key_value_pairs.emplace_back(wrapper.Key(), wrapper.Value());
}
std::vector<ConvMapWrapper> new_wrappers;
for (const auto& [key, value] : key_value_pairs) {
TF_ASSERT_OK_AND_ASSIGN(ConvMapWrapper wrapper,
ConvMapWrapper::FromKeyAndValue(key, value));
new_wrappers.push_back(wrapper);
}
TF_ASSERT_OK_AND_ASSIGN(ConvMapProto round_tripped,
ConvMapWrapper::ConvMapFromWrappers(new_wrappers));
EXPECT_EQ(round_tripped.kv_pairs_size(), 3);
EXPECT_EQ(round_tripped.kv_pairs(0).key().batch(), 1);
EXPECT_EQ(round_tripped.kv_pairs(0).key().in_depths(), 2);
EXPECT_EQ(round_tripped.kv_pairs(0).key().out_depths(), 3);
EXPECT_EQ(round_tripped.kv_pairs(0).value().algorithm().algo_id(), 4);
EXPECT_EQ(round_tripped.kv_pairs(1).key().batch(), 5);
EXPECT_EQ(round_tripped.kv_pairs(1).key().in_depths(), 6);
EXPECT_EQ(round_tripped.kv_pairs(1).key().out_depths(), 7);
EXPECT_EQ(round_tripped.kv_pairs(1).value().algorithm().algo_id(), 8);
EXPECT_EQ(round_tripped.kv_pairs(2).key().batch(), 9);
EXPECT_EQ(round_tripped.kv_pairs(2).key().in_depths(), 10);
EXPECT_EQ(round_tripped.kv_pairs(2).key().out_depths(), 11);
EXPECT_EQ(round_tripped.kv_pairs(2).value().algorithm().algo_id(), 12);
}
TEST(ConvMapWrapperTest, DeterministicSerialization) {
std::vector<ConvMapWrapper> wrappers =
ConvMapWrapper::ConvMapToWrappers(ThreeConvMapEntries());
std::vector<ConvMapWrapper::OpaqueKey> keys;
std::vector<ConvMapWrapper::OpaqueValue> values;
for (const auto& wrapper : wrappers) {
keys.push_back(wrapper.Key());
values.push_back(wrapper.Value());
}
const int kNumIterations = 100;
for (int i = 0; i < kNumIterations; ++i) {
std::vector<ConvMapWrapper> test_wrappers =
ConvMapWrapper::ConvMapToWrappers(ThreeConvMapEntries());
std::vector<ConvMapWrapper::OpaqueKey> test_keys;
std::vector<ConvMapWrapper::OpaqueValue> test_values;
for (const auto& test_wrapper : test_wrappers) {
test_keys.push_back(test_wrapper.Key());
test_values.push_back(test_wrapper.Value());
}
EXPECT_EQ(keys, test_keys);
EXPECT_EQ(values, test_values);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/conv_map_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/conv_map_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits