ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
600 | cpp | google/tensorstore | coalesce_kvstore | tensorstore/kvstore/ocdbt/io/coalesce_kvstore.cc | tensorstore/kvstore/ocdbt/io/coalesce_kvstore_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_IO_COALESCE_KVSTORE_H_
#define TENSORSTORE_KVSTORE_OCDBT_IO_COALESCE_KVSTORE_H_
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/util/executor.h"
namespace tensorstore {
namespace internal_ocdbt {
kvstore::DriverPtr MakeCoalesceKvStoreDriver(kvstore::DriverPtr base,
size_t threshold,
size_t merged_threshold,
absl::Duration interval,
Executor executor);
}
}
#endif
#include "tensorstore/kvstore/ocdbt/io/coalesce_kvstore.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
absl::Cord DeepCopyCord(const absl::Cord& cord) {
if (std::optional<absl::string_view> flat = cord.TryFlat();
flat.has_value()) {
return absl::Cord(*flat);
}
internal::FlatCordBuilder builder(cord.size(), false);
for (absl::string_view s : cord.Chunks()) {
builder.Append(s);
}
return std::move(builder).Build();
}
absl::Cord MaybeDeepCopyCord(absl::Cord cord) {
if (cord.EstimatedMemoryUsage() > (cord.size() * 1.2)) {
return DeepCopyCord(cord);
}
return cord;
}
struct PendingRead : public internal::AtomicReferenceCount<PendingRead> {
kvstore::Key key;
struct Op {
kvstore::ReadOptions options;
Promise<kvstore::ReadResult> promise;
};
std::vector<Op> pending_ops;
};
struct PendingReadEq {
using is_transparent = void;
inline bool operator()(const PendingRead& a, const PendingRead& b) const {
return a.key == b.key;
}
inline bool operator()(std::string_view a, std::string_view b) const {
return a == b;
}
inline bool operator()(const PendingRead& a, std::string_view b) const {
return a.key == b;
}
inline bool operator()(std::string_view a, const PendingRead& b) const {
return a == b.key;
}
inline bool operator()(std::string_view a,
const internal::IntrusivePtr<PendingRead>& b) const {
return b == nullptr ? false : PendingReadEq{}(a, *b);
}
inline bool operator()(const internal::IntrusivePtr<PendingRead>& a,
std::string_view b) const {
return a == nullptr ? false : PendingReadEq{}(*a, b);
}
inline bool operator()(const internal::IntrusivePtr<PendingRead>& a,
const internal::IntrusivePtr<PendingRead>& b) const {
return a->key == b->key;
}
};
struct PendingReadHash {
using is_transparent = void;
size_t operator()(std::string_view k) const { return absl::HashOf(k); }
size_t operator()(const internal::IntrusivePtr<PendingRead>& k) const {
return absl::HashOf(k->key);
}
};
class CoalesceKvStoreDriver final : public kvstore::Driver {
public:
explicit CoalesceKvStoreDriver(kvstore::DriverPtr base, size_t threshold,
size_t merged_threshold,
absl::Duration interval, Executor executor)
: base_(std::move(base)),
threshold_(threshold),
merged_threshold_(merged_threshold),
interval_(interval),
thread_pool_executor_(std::move(executor)) {}
~CoalesceKvStoreDriver() override = default;
Future<ReadResult> Read(Key key, ReadOptions options = {}) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override {
return base_->Write(std::move(key), std::move(value), std::move(options));
}
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override {
return base_->ReadModifyWrite(transaction, phase, std::move(key), source);
}
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction,
KeyRange range) override {
return base_->TransactionalDeleteRange(transaction, std::move(range));
}
Future<const void> DeleteRange(KeyRange range) override {
return base_->DeleteRange(std::move(range));
}
void ListImpl(ListOptions options, ListReceiver receiver) override {
return base_->ListImpl(std::move(options), std::move(receiver));
}
std::string DescribeKey(std::string_view key) override {
return base_->DescribeKey(key);
}
Result<kvstore::DriverSpecPtr> GetBoundSpec() const override {
return base_->GetBoundSpec();
}
kvstore::SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return base_->GetSupportedFeatures(key_range);
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const override {
return base_->GarbageCollectionVisit(visitor);
}
void StartNextRead(internal::IntrusivePtr<PendingRead> state_ptr);
private:
kvstore::DriverPtr base_;
size_t threshold_;
size_t merged_threshold_;
absl::Duration interval_;
Executor thread_pool_executor_;
absl::Mutex mu_;
absl::flat_hash_set<internal::IntrusivePtr<PendingRead>, PendingReadHash,
PendingReadEq>
pending_ ABSL_GUARDED_BY(mu_);
};
Future<kvstore::ReadResult> CoalesceKvStoreDriver::Read(Key key,
ReadOptions options) {
internal::IntrusivePtr<PendingRead> state_ptr;
{
absl::MutexLock l(&mu_);
auto it = pending_.find(std::string_view(key));
if (it != pending_.end()) {
auto& state = *it;
auto op = PromiseFuturePair<ReadResult>::Make();
state->pending_ops.emplace_back(
PendingRead::Op{std::move(options), std::move(op.promise)});
return std::move(op.future);
} else {
state_ptr = internal::MakeIntrusivePtr<PendingRead>();
state_ptr->key = key;
bool inserted;
std::tie(it, inserted) = pending_.insert(state_ptr);
if (interval_ != absl::ZeroDuration()) {
internal::ScheduleAt(
absl::Now() + interval_,
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
state = std::move(state_ptr)] {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), state = std::move(state)] {
self->StartNextRead(std::move(state));
});
});
auto& state = *it;
auto op = PromiseFuturePair<ReadResult>::Make();
state->pending_ops.emplace_back(
PendingRead::Op{std::move(options), std::move(op.promise)});
return std::move(op.future);
}
}
}
auto future = base_->Read(key, std::move(options));
future.ExecuteWhenReady(
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
state = std::move(state_ptr)](ReadyFuture<ReadResult>) {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), state = std::move(state)] {
self->StartNextRead(std::move(state));
});
});
return future;
}
struct MergeValue {
kvstore::ReadOptions options;
struct Entry {
OptionalByteRangeRequest byte_range;
Promise<kvstore::ReadResult> promise;
};
std::vector<Entry> subreads;
};
void OnReadComplete(MergeValue merge_values,
ReadyFuture<kvstore::ReadResult> ready) {
if (!ready.result().ok() || !ready.value().has_value() ||
merge_values.subreads.size() == 1) {
for (const auto& e : merge_values.subreads) {
e.promise.SetResult(ready.result());
}
} else {
kvstore::ReadResult result = ready.value();
absl::Cord value = std::move(result.value);
for (const auto& e : merge_values.subreads) {
size_t request_start, request_size;
if (e.byte_range.inclusive_min < 0) {
request_start = value.size() + e.byte_range.inclusive_min;
} else {
request_start = e.byte_range.inclusive_min -
merge_values.options.byte_range.inclusive_min;
}
if (e.byte_range.exclusive_max == -1) {
request_size = std::numeric_limits<size_t>::max();
} else {
request_size = e.byte_range.exclusive_max - e.byte_range.inclusive_min;
}
result.value =
MaybeDeepCopyCord(value.Subcord(request_start, request_size));
e.promise.SetResult(result);
}
}
}
void CoalesceKvStoreDriver::StartNextRead(
internal::IntrusivePtr<PendingRead> state_ptr) {
std::vector<PendingRead::Op> pending;
{
absl::MutexLock l(&mu_);
if (state_ptr->pending_ops.empty()) {
pending_.erase(state_ptr->key);
return;
} else {
std::swap(pending, state_ptr->pending_ops);
}
}
if (interval_ != absl::ZeroDuration()) {
internal::ScheduleAt(
absl::Now() + interval_,
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
state = state_ptr] {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), state = std::move(state)] {
self->StartNextRead(std::move(state));
});
});
}
std::sort(pending.begin(), pending.end(), [](const auto& a, const auto& b) {
return std::tie(a.options.generation_conditions.if_equal.value,
a.options.generation_conditions.if_not_equal.value,
a.options.byte_range.inclusive_min,
a.options.byte_range.exclusive_max) <
std::tie(b.options.generation_conditions.if_equal.value,
b.options.generation_conditions.if_not_equal.value,
b.options.byte_range.inclusive_min,
b.options.byte_range.exclusive_max);
});
kvstore::Key key = state_ptr->key;
MergeValue merged;
const auto& first_pending = pending.front();
merged.options = first_pending.options;
merged.subreads.emplace_back(
MergeValue::Entry{std::move(first_pending.options.byte_range),
std::move(first_pending.promise)});
for (size_t i = 1; i < pending.size(); ++i) {
auto& e = pending[i];
if (e.options.generation_conditions.if_equal !=
merged.options.generation_conditions.if_equal ||
e.options.generation_conditions.if_not_equal !=
merged.options.generation_conditions.if_not_equal ||
(e.options.byte_range.inclusive_min < 0) !=
(merged.options.byte_range.inclusive_min < 0)) {
assert(!merged.subreads.empty());
auto f = base_->Read(key, merged.options);
f.ExecuteWhenReady(
[merged = std::move(merged)](ReadyFuture<kvstore::ReadResult> ready) {
OnReadComplete(std::move(merged), std::move(ready));
});
merged = MergeValue{};
merged.options = e.options;
} else if (merged.options.byte_range.exclusive_max != -1 &&
((e.options.byte_range.inclusive_min -
merged.options.byte_range.exclusive_max >
threshold_) ||
(merged_threshold_ > 0 &&
merged.options.byte_range.size() > merged_threshold_))) {
assert(!merged.subreads.empty());
auto f = base_->Read(key, merged.options);
f.ExecuteWhenReady(
[merged = std::move(merged)](ReadyFuture<kvstore::ReadResult> ready) {
OnReadComplete(std::move(merged), std::move(ready));
});
merged = MergeValue{};
merged.options = e.options;
} else {
merged.options.staleness_bound =
std::max(merged.options.staleness_bound, e.options.staleness_bound);
merged.options.byte_range.inclusive_min =
std::min(merged.options.byte_range.inclusive_min,
e.options.byte_range.inclusive_min);
if (merged.options.byte_range.exclusive_max != -1) {
if (e.options.byte_range.exclusive_max != -1) {
merged.options.byte_range.exclusive_max =
std::max(merged.options.byte_range.exclusive_max,
e.options.byte_range.exclusive_max);
} else {
merged.options.byte_range.exclusive_max = -1;
}
}
}
merged.subreads.emplace_back(MergeValue::Entry{
std::move(e.options.byte_range), std::move(e.promise)});
}
assert(!merged.subreads.empty());
auto f = base_->Read(key, merged.options);
f.ExecuteWhenReady(
[self = internal::IntrusivePtr<CoalesceKvStoreDriver>(this),
merged = std::move(merged),
state = std::move(state_ptr)](ReadyFuture<kvstore::ReadResult> ready) {
auto& executor = self->thread_pool_executor_;
executor([self = std::move(self), merged = std::move(merged),
state = std::move(state), ready = std::move(ready)] {
OnReadComplete(std::move(merged), std::move(ready));
if (self->interval_ == absl::ZeroDuration()) {
self->StartNextRead(std::move(state));
}
});
});
}
}
kvstore::DriverPtr MakeCoalesceKvStoreDriver(kvstore::DriverPtr base,
size_t threshold,
size_t merged_threshold,
absl::Duration interval,
Executor executor) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coalescing reads with threshold: " << threshold
<< ", merged_threshold: " << merged_threshold
<< ", interval: " << interval;
return internal::MakeIntrusivePtr<CoalesceKvStoreDriver>(
std::move(base), threshold, merged_threshold, interval,
std::move(executor));
}
}
} | #include "tensorstore/kvstore/ocdbt/io/coalesce_kvstore.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal_ocdbt::MakeCoalesceKvStoreDriver;
using ::tensorstore::kvstore::ReadOptions;
TEST(CoalesceKvstoreTest, SimpleRead) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 100, 0,
absl::ZeroDuration(),
tensorstore::internal::DetachedThreadPool(1));
auto write_future = kvstore::Write(coalesce_driver, "a", absl::Cord("a"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
auto read_future = kvstore::Read(coalesce_driver, "a");
read_future.Force();
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ASSERT_TRUE(read_future.result().has_value());
ASSERT_TRUE(read_future.result().value().has_value());
EXPECT_EQ(read_future.result().value().value, absl::Cord("a"));
}
TEST(CoalesceKvstoreTest, ReadWithThreshold) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 1, 0,
absl::ZeroDuration(),
tensorstore::internal::DetachedThreadPool(1));
auto write_future =
kvstore::Write(coalesce_driver, "a", absl::Cord("0123456789"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ReadOptions ro1, ro2, ro3, ro4;
ro1.byte_range =
OptionalByteRangeRequest(0, 1);
ro2.byte_range = OptionalByteRangeRequest(2, 3);
ro3.byte_range = OptionalByteRangeRequest(4, 5);
ro4.byte_range =
OptionalByteRangeRequest(7, 8);
auto read_future1 = kvstore::Read(coalesce_driver, "a", ro1);
auto read_future2 = kvstore::Read(coalesce_driver, "a", ro2);
auto read_future3 = kvstore::Read(coalesce_driver, "a", ro3);
auto read_future4 = kvstore::Read(coalesce_driver, "a", ro4);
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, ro1.byte_range);
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future1.result());
EXPECT_EQ(read_future1.result().value().value, absl::Cord("0"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(2, 5));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future2.result());
EXPECT_EQ(read_future2.result().value().value, absl::Cord("2"));
TENSORSTORE_EXPECT_OK(read_future3.result());
EXPECT_EQ(read_future3.result().value().value, absl::Cord("4"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(7, 8));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future4.result());
EXPECT_EQ(read_future4.result().value().value, absl::Cord("7"));
}
TEST(CoalesceKvstoreTest, ReadWithMergedThreshold) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 1, 2,
absl::ZeroDuration(),
tensorstore::internal::DetachedThreadPool(1));
auto write_future =
kvstore::Write(coalesce_driver, "a", absl::Cord("0123456789"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ReadOptions ro1, ro2, ro3, ro4, ro5;
ro1.byte_range =
OptionalByteRangeRequest(0, 1);
ro2.byte_range = OptionalByteRangeRequest(2, 3);
ro3.byte_range = OptionalByteRangeRequest(4, 5);
ro4.byte_range = OptionalByteRangeRequest(6, 7);
ro5.byte_range = OptionalByteRangeRequest(8, 9);
auto read_future1 = kvstore::Read(coalesce_driver, "a", ro1);
auto read_future2 = kvstore::Read(coalesce_driver, "a", ro2);
auto read_future3 = kvstore::Read(coalesce_driver, "a", ro3);
auto read_future4 = kvstore::Read(coalesce_driver, "a", ro4);
auto read_future5 = kvstore::Read(coalesce_driver, "a", ro5);
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, ro1.byte_range);
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future1.result());
EXPECT_EQ(read_future1.result().value().value, absl::Cord("0"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(2, 5));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future2.result());
EXPECT_EQ(read_future2.result().value().value, absl::Cord("2"));
TENSORSTORE_EXPECT_OK(read_future3.result());
EXPECT_EQ(read_future3.result().value().value, absl::Cord("4"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(6, 9));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future4.result());
EXPECT_EQ(read_future4.result().value().value, absl::Cord("6"));
TENSORSTORE_EXPECT_OK(read_future5.result());
EXPECT_EQ(read_future5.result().value().value, absl::Cord("8"));
}
TEST(CoalesceKvstoreTest, ReadWithInterval) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
auto mock_key_value_store = MockKeyValueStore::Make();
auto coalesce_driver = MakeCoalesceKvStoreDriver(
mock_key_value_store, 1, 0,
absl::Milliseconds(10),
tensorstore::internal::DetachedThreadPool(1));
auto write_future =
kvstore::Write(coalesce_driver, "a", absl::Cord("0123456789"));
write_future.Force();
{
auto req = mock_key_value_store->write_requests.pop();
EXPECT_EQ("a", req.key);
req(base_store.driver);
}
ReadOptions ro1, ro2, ro3, ro4;
ro1.byte_range = OptionalByteRangeRequest(0, 1);
ro2.byte_range = OptionalByteRangeRequest(2, 3);
ro3.byte_range = OptionalByteRangeRequest(4, 5);
ro4.byte_range =
OptionalByteRangeRequest(7, 8);
auto read_future1 = kvstore::Read(coalesce_driver, "a", ro1);
auto read_future2 = kvstore::Read(coalesce_driver, "a", ro2);
auto read_future3 = kvstore::Read(coalesce_driver, "a", ro3);
auto read_future4 = kvstore::Read(coalesce_driver, "a", ro4);
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(0, 5));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future1.result());
EXPECT_EQ(read_future1.result().value().value, absl::Cord("0"));
TENSORSTORE_EXPECT_OK(read_future2.result());
EXPECT_EQ(read_future2.result().value().value, absl::Cord("2"));
TENSORSTORE_EXPECT_OK(read_future3.result());
EXPECT_EQ(read_future3.result().value().value, absl::Cord("4"));
{
auto req = mock_key_value_store->read_requests.pop();
EXPECT_EQ("a", req.key);
EXPECT_EQ(req.options.byte_range, OptionalByteRangeRequest(7, 8));
req(base_store.driver);
}
TENSORSTORE_EXPECT_OK(read_future4.result());
EXPECT_EQ(read_future4.result().value().value, absl::Cord("7"));
}
} |
601 | cpp | google/tensorstore | indirect_data_writer | tensorstore/kvstore/ocdbt/io/indirect_data_writer.cc | tensorstore/kvstore/ocdbt/io/indirect_data_writer_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_IO_INDIRECT_DATA_WRITER_H_
#define TENSORSTORE_KVSTORE_OCDBT_IO_INDIRECT_DATA_WRITER_H_
#include <stddef.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal_ocdbt {
class IndirectDataWriter;
using IndirectDataWriterPtr = internal::IntrusivePtr<IndirectDataWriter>;
void intrusive_ptr_increment(IndirectDataWriter* p);
void intrusive_ptr_decrement(IndirectDataWriter* p);
IndirectDataWriterPtr MakeIndirectDataWriter(kvstore::KvStore kvstore,
std::string prefix,
size_t target_size);
Future<const void> Write(IndirectDataWriter& self, absl::Cord data,
IndirectDataReference& ref);
}
}
#endif
#include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <stddef.h>
#include <cassert>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
auto& indirect_data_writer_histogram =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/kvstore/ocdbt/indirect_data_write_size",
"Histogram of OCDBT buffered write sizes.");
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
}
class IndirectDataWriter
: public internal::AtomicReferenceCount<IndirectDataWriter> {
public:
explicit IndirectDataWriter(kvstore::KvStore kvstore, std::string prefix,
size_t target_size)
: kvstore_(std::move(kvstore)),
prefix_(std::move(prefix)),
target_size_(target_size) {}
kvstore::KvStore kvstore_;
std::string prefix_;
size_t target_size_;
absl::Mutex mutex_;
size_t in_flight_ = 0;
bool flush_requested_ = false;
absl::Cord buffer_;
Promise<void> promise_;
DataFileId data_file_id_;
};
void intrusive_ptr_increment(IndirectDataWriter* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
void intrusive_ptr_decrement(IndirectDataWriter* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
namespace {
void MaybeFlush(IndirectDataWriter& self, UniqueWriterLock<absl::Mutex> lock) {
bool buffer_at_target =
self.target_size_ > 0 && self.buffer_.size() >= self.target_size_;
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "MaybeFlush: flush_requested=" << self.flush_requested_
<< ", in_flight=" << self.in_flight_
<< ", buffer_at_target=" << buffer_at_target;
if (buffer_at_target) {
} else if (!self.flush_requested_ || self.in_flight_ > 0) {
return;
}
self.in_flight_++;
self.flush_requested_ = false;
Promise<void> promise = std::exchange(self.promise_, {});
absl::Cord buffer = std::exchange(self.buffer_, {});
DataFileId data_file_id = self.data_file_id_;
lock.unlock();
indirect_data_writer_histogram.Observe(buffer.size());
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Flushing " << buffer.size() << " bytes to " << data_file_id;
auto write_future =
kvstore::Write(self.kvstore_, data_file_id.FullPath(), std::move(buffer));
write_future.Force();
write_future.ExecuteWhenReady(
[promise = std::move(promise), data_file_id = std::move(data_file_id),
self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
ReadyFuture<TimestampedStorageGeneration> future) {
auto& r = future.result();
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Done flushing data to " << data_file_id << ": " << r.status();
if (!r.ok()) {
promise.SetResult(r.status());
} else if (StorageGeneration::IsUnknown(r->generation)) {
promise.SetResult(absl::UnavailableError("Non-unique file id"));
} else {
promise.SetResult(absl::OkStatus());
}
UniqueWriterLock lock{self->mutex_};
assert(self->in_flight_ > 0);
self->in_flight_--;
MaybeFlush(*self, std::move(lock));
});
}
}
Future<const void> Write(IndirectDataWriter& self, absl::Cord data,
IndirectDataReference& ref) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Write indirect data: size=" << data.size();
if (data.empty()) {
ref.file_id = DataFileId{};
ref.offset = 0;
ref.length = 0;
return absl::OkStatus();
}
UniqueWriterLock lock{self.mutex_};
Future<const void> future;
if (self.promise_.null() || (future = self.promise_.future()).null()) {
self.data_file_id_ = GenerateDataFileId(self.prefix_);
auto p = PromiseFuturePair<void>::Make();
self.promise_ = std::move(p.promise);
future = std::move(p.future);
self.promise_.ExecuteWhenForced(
[self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
Promise<void> promise) {
ABSL_LOG_IF(INFO, ocdbt_logging) << "Force called";
UniqueWriterLock lock{self->mutex_};
if (!HaveSameSharedState(promise, self->promise_)) return;
self->flush_requested_ = true;
MaybeFlush(*self, std::move(lock));
});
}
ref.file_id = self.data_file_id_;
ref.offset = self.buffer_.size();
ref.length = data.size();
self.buffer_.Append(std::move(data));
if (self.target_size_ > 0 && self.buffer_.size() >= self.target_size_) {
MaybeFlush(self, std::move(lock));
}
return future;
}
IndirectDataWriterPtr MakeIndirectDataWriter(kvstore::KvStore kvstore,
std::string prefix,
size_t target_size) {
return internal::MakeIntrusivePtr<IndirectDataWriter>(
std::move(kvstore), std::move(prefix), target_size);
}
}
} | #include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <algorithm>
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Future;
using ::tensorstore::internal::FlatCordBuilder;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal_ocdbt::IndirectDataReference;
using ::tensorstore::internal_ocdbt::MakeIndirectDataWriter;
using ::tensorstore::internal_ocdbt::Write;
namespace {
absl::Cord GetCord(size_t size) {
FlatCordBuilder cord_builder(size);
memset(cord_builder.data(), 0x37, cord_builder.size());
return std::move(cord_builder).Build();
}
template <typename T>
std::vector<std::string> ListEntriesToFiles(T& entries) {
std::vector<std::string> files;
files.reserve(entries.size());
for (auto& e : entries) {
files.push_back(std::move(e.key));
}
std::sort(files.begin(), files.end());
return files;
}
TEST(IndirectDataWriter, UnlimitedSize) {
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", 0);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Eq(2)));
while (!mock_key_value_store->write_requests.empty()) {
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Eq(1));
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(2));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
TEST(IndirectDataWriter, LimitedSize) {
constexpr size_t kTargetSize = 1024;
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", kTargetSize);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
EXPECT_THAT(ref.offset, testing::Le(kTargetSize));
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Ge(250)));
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Gt(1));
while (!mock_key_value_store->write_requests.empty()) {
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(refs.size()));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
} |
602 | cpp | google/tensorstore | btree | tensorstore/kvstore/ocdbt/format/btree.cc | tensorstore/kvstore/ocdbt/format/btree_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_FORMAT_BTREE_H_
#define TENSORSTORE_KVSTORE_OCDBT_FORMAT_BTREE_H_
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_ocdbt {
using KeyLength = uint16_t;
constexpr KeyLength kMaxKeyLength = 65535;
struct BtreeNodeStatistics {
uint64_t num_indirect_value_bytes;
uint64_t num_tree_bytes;
uint64_t num_keys;
BtreeNodeStatistics& operator+=(const BtreeNodeStatistics& other);
friend bool operator==(const BtreeNodeStatistics& a,
const BtreeNodeStatistics& b);
friend bool operator!=(const BtreeNodeStatistics& a,
const BtreeNodeStatistics& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os,
const BtreeNodeStatistics& x);
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.num_indirect_value_bytes, x.num_tree_bytes, x.num_keys);
};
};
struct BtreeNodeReference {
IndirectDataReference location;
BtreeNodeStatistics statistics;
friend bool operator==(const BtreeNodeReference& a,
const BtreeNodeReference& b);
friend bool operator!=(const BtreeNodeReference& a,
const BtreeNodeReference& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os,
const BtreeNodeReference& x);
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.location, x.statistics);
};
};
using BtreeNodeHeight = uint8_t;
using LeafNodeValueReference = std::variant<absl::Cord, IndirectDataReference>;
using LeafNodeValueKind = uint8_t;
constexpr LeafNodeValueKind kInlineValue = 0;
constexpr LeafNodeValueKind kOutOfLineValue = 1;
struct LeafNodeEntry {
std::string_view key;
LeafNodeValueReference value_reference;
uint64_t value_size() const {
struct LeafNodeSizeVisitor {
uint64_t operator()(const absl::Cord& direct) const {
return direct.size();
}
uint64_t operator()(const IndirectDataReference& ref) const {
return ref.length;
}
};
return std::visit(LeafNodeSizeVisitor{}, value_reference);
}
friend bool operator==(const LeafNodeEntry& a, const LeafNodeEntry& b);
friend bool operator!=(const LeafNodeEntry& a, const LeafNodeEntry& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os, const LeafNodeEntry& e);
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.key, x.value_reference);
};
};
template <typename Key>
struct InteriorNodeEntryData {
static_assert(std::is_same_v<Key, std::string> ||
std::is_same_v<Key, std::string_view>);
Key key;
KeyLength subtree_common_prefix_length;
std::string_view key_suffix() const {
return std::string_view(key).substr(subtree_common_prefix_length);
}
BtreeNodeReference node;
friend bool operator==(const InteriorNodeEntryData& a,
const InteriorNodeEntryData& b) {
return a.key == b.key &&
a.subtree_common_prefix_length == b.subtree_common_prefix_length &&
a.node == b.node;
}
friend bool operator!=(const InteriorNodeEntryData& a,
const InteriorNodeEntryData& b) {
return !(a == b);
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.key, x.subtree_common_prefix_length, x.node);
};
};
struct InteriorNodeEntry : public InteriorNodeEntryData<std::string_view> {
friend std::ostream& operator<<(std::ostream& os, const InteriorNodeEntry& e);
};
struct BtreeNode {
BtreeNodeHeight height;
std::string_view key_prefix;
using LeafNodeEntries = std::vector<LeafNodeEntry>;
using InteriorNodeEntries = std::vector<InteriorNodeEntry>;
using Entries = std::variant<LeafNodeEntries, InteriorNodeEntries>;
Entries entries;
struct KeyBuffer {
KeyBuffer() = default;
KeyBuffer(size_t size) : data(new char[size]), size(size) {}
std::shared_ptr<char[]> data;
size_t size = 0;
};
KeyBuffer key_buffer;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.height, x.key_prefix, x.entries, x.key_buffer);
};
};
constexpr size_t kLeafNodeFixedSize = 8
+ 8;
constexpr size_t kInteriorNodeFixedSize =
+8
+ 8
+ 8
+ 8
+ sizeof(BtreeNodeStatistics);
inline size_t GetLeafNodeDataSize(const LeafNodeEntry& entry) {
if (auto* value = std::get_if<absl::Cord>(&entry.value_reference)) {
return value->size();
} else {
auto& ref = std::get<IndirectDataReference>(entry.value_reference);
return 8
+ 8
+ ref.file_id.size();
}
}
inline size_t EstimateDecodedEntrySizeExcludingKey(const LeafNodeEntry& entry) {
return kLeafNodeFixedSize + GetLeafNodeDataSize(entry);
}
inline size_t EstimateDecodedEntrySizeExcludingKey(
const InteriorNodeEntry& entry) {
return kInteriorNodeFixedSize + entry.node.location.file_id.size();
}
absl::Status ValidateBtreeNodeReference(const BtreeNode& node,
BtreeNodeHeight height,
std::string_view inclusive_min_key);
Result<BtreeNode> DecodeBtreeNode(const absl::Cord& encoded,
const BasePath& base_path);
struct ComparePrefixedKeyToUnprefixedKey {
std::string_view prefix;
int operator()(std::string_view prefixed, std::string_view unprefixed) const {
auto unprefixed_prefix =
unprefixed.substr(0, std::min(unprefixed.size(), prefix.size()));
int c = prefix.compare(unprefixed_prefix);
if (c != 0) return c;
return prefixed.compare(unprefixed.substr(prefix.size()));
}
};
const LeafNodeEntry* FindBtreeEntry(span<const LeafNodeEntry> entries,
std::string_view key);
const LeafNodeEntry* FindBtreeEntryLowerBound(span<const LeafNodeEntry> entries,
std::string_view inclusive_min);
span<const LeafNodeEntry> FindBtreeEntryRange(span<const LeafNodeEntry> entries,
std::string_view inclusive_min,
std::string_view exclusive_max);
const InteriorNodeEntry* FindBtreeEntry(span<const InteriorNodeEntry> entries,
std::string_view key);
const InteriorNodeEntry* FindBtreeEntryLowerBound(
span<const InteriorNodeEntry> entries, std::string_view inclusive_min);
span<const InteriorNodeEntry> FindBtreeEntryRange(
span<const InteriorNodeEntry> entries, std::string_view inclusive_min,
std::string_view exclusive_max);
#ifndef NDEBUG
void CheckBtreeNodeInvariants(const BtreeNode& node);
#endif
}
namespace internal {
template <>
struct HeapUsageEstimator<internal_ocdbt::BtreeNode::KeyBuffer> {
static size_t EstimateHeapUsage(
const internal_ocdbt::BtreeNode::KeyBuffer& key_buffer,
size_t max_depth) {
return key_buffer.size;
}
};
}
}
#endif
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/reader.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/kvstore/ocdbt/format/btree_codec.h"
#include "tensorstore/kvstore/ocdbt/format/codec_util.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference_codec.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
bool ReadKeyPrefixLengths(riegeli::Reader& reader,
span<KeyLength> prefix_lengths,
KeyLength& common_prefix_length) {
KeyLength min_prefix_length = kMaxKeyLength;
for (auto& prefix_length : prefix_lengths) {
if (!KeyLengthCodec{}(reader, prefix_length)) return false;
min_prefix_length = std::min(min_prefix_length, prefix_length);
}
common_prefix_length = min_prefix_length;
return true;
}
bool ReadKeySuffixLengths(riegeli::Reader& reader,
span<KeyLength> suffix_lengths) {
for (auto& length : suffix_lengths) {
if (!KeyLengthCodec{}(reader, length)) return false;
}
return true;
}
template <typename Entry>
bool ReadKeys(riegeli::Reader& reader, std::string_view& common_prefix,
BtreeNode::KeyBuffer& key_buffer, span<Entry> entries) {
const size_t num_entries = entries.size();
KeyLength common_prefix_length;
std::vector<KeyLength> key_length_buffer(num_entries * 2);
span<KeyLength> prefix_lengths(key_length_buffer.data(), num_entries);
span<KeyLength> suffix_lengths(key_length_buffer.data() + num_entries,
num_entries);
if (!ReadKeyPrefixLengths(reader, prefix_lengths.subspan(1),
common_prefix_length)) {
return false;
}
if (!ReadKeySuffixLengths(reader, suffix_lengths)) return false;
if constexpr (std::is_same_v<Entry, InteriorNodeEntry>) {
for (auto& entry : entries) {
if (!KeyLengthCodec{}(reader, entry.subtree_common_prefix_length)) {
return false;
}
common_prefix_length =
std::min(common_prefix_length, entry.subtree_common_prefix_length);
}
}
common_prefix_length = std::min(suffix_lengths[0], common_prefix_length);
size_t key_buffer_size = common_prefix_length;
for (size_t i = 0, prev_length = 0; i < num_entries; ++i) {
size_t prefix_length = prefix_lengths[i];
if (prefix_length > prev_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"Child %d: Prefix length of %d exceeds previous key length %d", i,
prefix_length, prev_length)));
return false;
}
size_t suffix_length = suffix_lengths[i];
size_t key_length = prefix_length + suffix_length;
if (key_length > kMaxKeyLength) {
reader.Fail(absl::DataLossError(
absl::StrFormat("Child %d: Key length %d exceeds limit of %d", i,
key_length, kMaxKeyLength)));
return false;
}
if constexpr (std::is_same_v<Entry, InteriorNodeEntry>) {
auto& entry = entries[i];
if (entry.subtree_common_prefix_length > key_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"Key %d: subtree common prefix length of %d exceeds key length of "
"%d",
i, entry.subtree_common_prefix_length, key_length)));
return false;
}
assert(entry.subtree_common_prefix_length >= common_prefix_length);
entry.subtree_common_prefix_length -= common_prefix_length;
}
prev_length = key_length;
key_buffer_size += key_length - common_prefix_length;
}
key_buffer = BtreeNode::KeyBuffer(key_buffer_size);
char* key_buffer_ptr = key_buffer.data.get();
const auto append_key_data = [&](auto... parts) {
std::string_view s(key_buffer_ptr, (parts.size() + ...));
(static_cast<void>(std::memcpy(key_buffer_ptr, parts.data(), parts.size()),
key_buffer_ptr += parts.size()),
...);
return s;
};
{
size_t key_length = suffix_lengths[0];
if (!reader.Pull(key_length)) return false;
auto full_first_key =
append_key_data(std::string_view(reader.cursor(), key_length));
common_prefix = full_first_key.substr(0, common_prefix_length);
entries[0].key = full_first_key.substr(common_prefix_length);
reader.move_cursor(key_length);
}
for (size_t i = 1; i < num_entries; ++i) {
size_t prefix_length = prefix_lengths[i] - common_prefix_length;
size_t suffix_length = suffix_lengths[i];
if (!reader.Pull(suffix_length)) return false;
auto prev_key = std::string_view(entries[i - 1].key);
auto suffix = std::string_view(reader.cursor(), suffix_length);
if (prev_key.substr(prefix_length) >= suffix) {
reader.Fail(absl::DataLossError("Invalid key order"));
return false;
}
entries[i].key = append_key_data(prev_key.substr(0, prefix_length), suffix);
reader.move_cursor(suffix_length);
}
return true;
}
template <typename Entry>
bool ReadBtreeNodeEntries(riegeli::Reader& reader,
const DataFileTable& data_file_table,
uint64_t num_entries, BtreeNode& node) {
auto& entries = node.entries.emplace<std::vector<Entry>>();
entries.resize(num_entries);
if (!ReadKeys<Entry>(reader, node.key_prefix, node.key_buffer, entries)) {
return false;
}
if constexpr (std::is_same_v<Entry, InteriorNodeEntry>) {
return BtreeNodeReferenceArrayCodec{data_file_table,
[](auto& entry) -> decltype(auto) {
return (entry.node);
}}(reader, entries);
} else {
return LeafNodeValueReferenceArrayCodec{data_file_table,
[](auto& entry) -> decltype(auto) {
return (entry.value_reference);
}}(reader, entries);
}
}
}
Result<BtreeNode> DecodeBtreeNode(const absl::Cord& encoded,
const BasePath& base_path) {
BtreeNode node;
auto status = DecodeWithOptionalCompression(
encoded, kBtreeNodeMagic, kBtreeNodeFormatVersion,
[&](riegeli::Reader& reader, uint32_t version) -> bool {
if (!reader.ReadByte(node.height)) return false;
DataFileTable data_file_table;
if (!ReadDataFileTable(reader, base_path, data_file_table)) {
return false;
}
uint32_t num_entries;
if (!ReadVarintChecked(reader, num_entries)) return false;
if (num_entries == 0) {
reader.Fail(absl::DataLossError("Empty b-tree node"));
return false;
}
if (num_entries > kMaxNodeArity) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"B-tree node has arity %d, which exceeds limit of %d",
num_entries, kMaxNodeArity)));
return false;
}
if (node.height == 0) {
return ReadBtreeNodeEntries<LeafNodeEntry>(reader, data_file_table,
num_entries, node);
} else {
return ReadBtreeNodeEntries<InteriorNodeEntry>(
reader, data_file_table, num_entries, node);
}
});
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(status,
"Error decoding b-tree node");
}
#ifndef NDEBUG
CheckBtreeNodeInvariants(node);
#endif
return node;
}
absl::Status ValidateBtreeNodeReference(const BtreeNode& node,
BtreeNodeHeight height,
std::string_view inclusive_min_key) {
if (node.height != height) {
return absl::DataLossError(absl::StrFormat(
"Expected height of %d but received: %d", height, node.height));
}
return std::visit(
[&](auto& entries) {
if (ComparePrefixedKeyToUnprefixedKey{node.key_prefix}(
entries.front().key, inclusive_min_key) < 0) {
return absl::DataLossError(
tensorstore::StrCat("First key ",
tensorstore::QuoteString(tensorstore::StrCat(
node.key_prefix, entries.front().key)),
" is less than inclusive_min ",
tensorstore::QuoteString(inclusive_min_key),
" specified by parent node"));
}
return absl::OkStatus();
},
node.entries);
}
bool operator==(const BtreeNodeStatistics& a, const BtreeNodeStatistics& b) {
return a.num_indirect_value_bytes == b.num_indirect_value_bytes &&
a.num_tree_bytes == b.num_tree_bytes && a.num_keys == b.num_keys;
}
std::ostream& operator<<(std::ostream& os, const BtreeNodeStatistics& x) {
return os << "{num_indirect_value_bytes=" << x.num_indirect_value_bytes
<< ", num_tree_bytes=" << x.num_tree_bytes
<< ", num_keys=" << x.num_keys << "}";
}
BtreeNodeStatistics& BtreeNodeStatistics::operator+=(
const BtreeNodeStatistics& other) {
num_indirect_value_bytes = internal::AddSaturate(
num_indirect_value_bytes, other.num_indirect_value_bytes);
num_tree_bytes = internal::AddSaturate(num_tree_bytes, other.num_tree_bytes);
num_keys = internal::AddSaturate(num_keys, other.num_keys);
return *this;
}
bool operator==(const LeafNodeEntry& a, const LeafNodeEntry& b) {
return a.key == b.key && a.value_reference == b.value_reference;
}
std::ostream& operator<<(std::ostream& os, const LeafNodeValueReference& x) {
if (auto* value = std::get_if<absl::Cord>(&x)) {
return os << tensorstore::QuoteString(std::string(*value));
} else {
return os << std::get<IndirectDataReference>(x);
}
}
std::ostream& operator<<(std::ostream& os, const LeafNodeEntry& e) {
return os << "{key=" << tensorstore::QuoteString(e.key)
<< ", value_reference=" << e.value_reference << "}";
}
bool operator==(const BtreeNodeReference& a, const BtreeNodeReference& b) {
return a.location == b.location && a.statistics == b.statistics;
}
std::ostream& operator<<(std::ostream& os, const BtreeNodeReference& x) {
return os << "{location=" << x.location << ", statistics=" << x.statistics
<< "}";
}
std::ostream& operator<<(std::ostream& os, const InteriorNodeEntry& e) {
return os << "{key=" << tensorstore::QuoteString(e.key)
<< ", subtree_common_prefix_length="
<< e.subtree_common_prefix_length << ", node=" << e.node << "}";
}
const LeafNodeEntry* FindBtreeEntry(span<const LeafNodeEntry> entries,
std::string_view key) {
const LeafNodeEntry* entry = FindBtreeEntryLowerBound(entries, key);
if (entry == entries.data() + entries.size() || entry->key != key) {
return nullptr;
}
return entry;
}
const LeafNodeEntry* FindBtreeEntryLowerBound(span<const LeafNodeEntry> entries,
std::string_view inclusive_min) {
return std::lower_bound(
entries.data(), entries.data() + entries.size(), inclusive_min,
[](const LeafNodeEntry& entry, std::string_view inclusive_min) {
return entry.key < inclusive_min;
});
}
span<const LeafNodeEntry> FindBtreeEntryRange(span<const LeafNodeEntry> entries,
std::string_view inclusive_min,
std::string_view exclusive_max) {
const LeafNodeEntry* lower = FindBtreeEntryLowerBound(entries, inclusive_min);
const LeafNodeEntry* upper = entries.data() + entries.size();
if (!exclusive_max.empty()) {
upper = std::lower_bound(
lower, upper, exclusive_max,
[](const LeafNodeEntry& entry, std::string_view exclusive_max) {
return entry.key < exclusive_max;
});
}
return {lower, upper};
}
const InteriorNodeEntry* FindBtreeEntry(span<const InteriorNodeEntry> entries,
std::string_view key) {
auto it = std::lower_bound(
entries.data(), entries.data() + entries.size(), key,
[](const InteriorNodeEntry& entry, std::string_view inclusive_min) {
return entry.key <= inclusive_min;
});
if (it == entries.data()) {
return nullptr;
}
return it - 1;
}
const InteriorNodeEntry* FindBtreeEntryLowerBound(
span<const InteriorNodeEntry> entries, std::string_view inclusive_min) {
auto it = std::lower_bound(
entries.data(), entries.data() + entries.size(), inclusive_min,
[](const InteriorNodeEntry& entry, std::string_view inclusive_min) {
return entry.key <= inclusive_min;
});
if (it != entries.data()) --it;
return it;
}
span<const InteriorNodeEntry> FindBtreeEntryRange(
span<const InteriorNodeEntry> entries, std::string_view inclusive_min,
std::string_view exclusive_max) {
const InteriorNodeEntry* lower =
FindBtreeEntryLowerBound(entries, inclusive_min);
const InteriorNodeEntry* upper = entries.data() + entries.size();
if (!exclusive_max.empty()) {
upper = std::lower_bound(
lower, upper, exclusive_max,
[](const InteriorNodeEntry& entry, std::string_view exclusive_max) {
return entry.key < exclusive_max;
});
}
return {lower, upper};
}
#ifndef NDEBUG
void CheckBtreeNodeInvariants(const BtreeNode& node) {
if (node.height == 0) {
assert(std::holds_alternative<BtreeNode::LeafNodeEntries>(node.entries));
auto& entries = std::get<BtreeNode::LeafNodeEntries>(node.entries);
assert(!entries.empty());
assert(entries.size() <= kMaxNodeArity);
for (size_t i = 0; i < entries.size(); ++i) {
auto& entry = entries[i];
if (auto* location =
std::get_if<IndirectDataReference>(&entry.value_reference)) {
assert(!location->IsMissing());
}
if (i != 0) {
assert(entry.key > entries[i - 1].key);
}
}
} else {
assert(
std::holds_alternative<BtreeNode::InteriorNodeEntries>(node.entries));
auto& entries = std::get<BtreeNode::InteriorNodeEntries>(node.entries);
assert(!entries.empty());
assert(entries.size() <= kMaxNodeArity);
for (size_t i = 0; i < entries.size(); ++i) {
auto& entry = entries[i];
assert(entry.subtree_common_prefix_length <= entry.key.size());
assert(!entry.node.location.IsMissing());
if (i != 0) {
assert(entry.key > entries[i - 1].key);
}
}
}
}
#endif
}
} | #include "tensorstore/kvstore/ocdbt/format/btree.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <string_view>
#include <type_traits>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/kvstore/ocdbt/format/btree_codec.h"
#include "tensorstore/kvstore/ocdbt/format/btree_node_encoder.h"
#include "tensorstore/kvstore/ocdbt/format/codec_util.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_ocdbt::BtreeNode;
using ::tensorstore::internal_ocdbt::BtreeNodeEncoder;
using ::tensorstore::internal_ocdbt::Config;
using ::tensorstore::internal_ocdbt::DecodeBtreeNode;
using ::tensorstore::internal_ocdbt::EncodedNode;
using ::tensorstore::internal_ocdbt::InteriorNodeEntry;
using ::tensorstore::internal_ocdbt::kMaxNodeArity;
using ::tensorstore::internal_ocdbt::LeafNodeEntry;
Result<std::vector<EncodedNode>> EncodeExistingNode(const Config& config,
const BtreeNode& node) {
return std::visit(
[&](const auto& entries) {
using Entry = typename std::decay_t<decltype(entries)>::value_type;
BtreeNodeEncoder<Entry> encoder(config, node.height,
node.key_prefix);
for (const auto& entry : entries) {
encoder.AddEntry(true, Entry(entry));
}
return encoder.Finalize(false);
},
node.entries);
}
void TestBtreeNodeRoundTrip(const Config& config, const BtreeNode& node) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded_nodes,
EncodeExistingNode(config, node));
ASSERT_EQ(1, encoded_nodes.size());
auto& encoded_node = encoded_nodes[0];
EXPECT_EQ(node.key_prefix, encoded_node.info.inclusive_min_key.substr(
0, encoded_node.info.excluded_prefix_length));
SCOPED_TRACE(tensorstore::StrCat(
"data=",
tensorstore::QuoteString(std::string(encoded_node.encoded_node))));
std::visit(
[&](const auto& entries) {
using Entry = typename std::decay_t<decltype(entries)>::value_type;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node,
DecodeBtreeNode(encoded_nodes[0].encoded_node, {}));
EXPECT_EQ(node.key_prefix,
tensorstore::StrCat(
encoded_node.info.inclusive_min_key.substr(
0, encoded_node.info.excluded_prefix_length),
decoded_node.key_prefix));
EXPECT_THAT(decoded_node.entries,
::testing::VariantWith<std::vector<Entry>>(entries));
},
node.entries);
}
TEST(BtreeNodeTest, LeafNodeRoundTrip) {
Config config;
config.compression = Config::NoCompression{};
BtreeNode node;
node.height = 0;
node.key_prefix = "ab";
auto& entries = node.entries.emplace<BtreeNode::LeafNodeEntries>();
entries.push_back({"c",
absl::Cord("value1")});
entries.push_back({"d",
absl::Cord("value2")});
TestBtreeNodeRoundTrip(config, node);
}
TEST(BtreeNodeTest, InteriorNodeRoundTrip) {
Config config;
BtreeNode node;
node.height = 2;
auto& entries = node.entries.emplace<BtreeNode::InteriorNodeEntries>();
{
InteriorNodeEntry entry;
entry.key = "abc";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc";
entry.node.location.file_id.relative_path = "def";
entry.node.location.offset = 5;
entry.node.location.length = 6;
entry.node.statistics.num_indirect_value_bytes = 100;
entry.node.statistics.num_tree_bytes = 200;
entry.node.statistics.num_keys = 5;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "def";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def1";
entry.node.location.offset = 42;
entry.node.location.length = 9;
entry.node.statistics.num_indirect_value_bytes = 101;
entry.node.statistics.num_tree_bytes = 220;
entry.node.statistics.num_keys = 8;
entries.push_back(entry);
}
TestBtreeNodeRoundTrip(config, node);
}
TEST(BtreeNodeTest, InteriorNodeBasePath) {
Config config;
BtreeNode node;
node.height = 2;
auto& entries = node.entries.emplace<BtreeNode::InteriorNodeEntries>();
{
InteriorNodeEntry entry;
entry.key = "abc";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc";
entry.node.location.file_id.relative_path = "def";
entry.node.location.offset = 5;
entry.node.location.length = 6;
entry.node.statistics.num_indirect_value_bytes = 100;
entry.node.statistics.num_tree_bytes = 200;
entry.node.statistics.num_keys = 5;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "def";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def1";
entry.node.location.offset = 42;
entry.node.location.length = 9;
entry.node.statistics.num_indirect_value_bytes = 101;
entry.node.statistics.num_tree_bytes = 220;
entry.node.statistics.num_keys = 8;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "ghi";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def2";
entry.node.location.offset = 43;
entry.node.location.length = 10;
entry.node.statistics.num_indirect_value_bytes = 102;
entry.node.statistics.num_tree_bytes = 230;
entry.node.statistics.num_keys = 9;
entries.push_back(entry);
}
{
InteriorNodeEntry entry;
entry.key = "jkl";
entry.subtree_common_prefix_length = 1;
entry.node.location.file_id.base_path = "abc1";
entry.node.location.file_id.relative_path = "def1";
entry.node.location.offset = 43;
entry.node.location.length = 10;
entry.node.statistics.num_indirect_value_bytes = 102;
entry.node.statistics.num_tree_bytes = 230;
entry.node.statistics.num_keys = 9;
entries.push_back(entry);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded_nodes,
EncodeExistingNode(config, node));
ASSERT_EQ(1, encoded_nodes.size());
auto& encoded_node = encoded_nodes[0];
EXPECT_EQ(node.key_prefix, encoded_node.info.inclusive_min_key.substr(
0, encoded_node.info.excluded_prefix_length));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node,
DecodeBtreeNode(encoded_nodes[0].encoded_node, "xyz/"));
entries[0].node.location.file_id.base_path = "xyz/abc";
entries[1].node.location.file_id.base_path = "xyz/abc1";
entries[2].node.location.file_id.base_path = "xyz/abc1";
entries[3].node.location.file_id.base_path = "xyz/abc1";
EXPECT_THAT(decoded_node.entries,
::testing::VariantWith<std::vector<InteriorNodeEntry>>(entries));
}
absl::Cord EncodeRawBtree(const std::vector<unsigned char>& data) {
using ::tensorstore::internal_ocdbt::kBtreeNodeFormatVersion;
using ::tensorstore::internal_ocdbt::kBtreeNodeMagic;
Config config;
config.compression = Config::NoCompression{};
return EncodeWithOptionalCompression(
config, kBtreeNodeMagic, kBtreeNodeFormatVersion,
[&](riegeli::Writer& writer) -> bool {
return writer.Write(std::string_view(
reinterpret_cast<const char*>(data.data()), data.size()));
})
.value();
}
absl::Status RoundTripRawBtree(const std::vector<unsigned char>& data) {
return DecodeBtreeNode(EncodeRawBtree(data), {}).status();
}
TEST(BtreeNodeTest, CorruptTruncateBodyZeroSize) {
EXPECT_THAT(
RoundTripRawBtree({}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Unexpected end of data; .*"));
}
TEST(BtreeNodeTest, CorruptLeafTruncatedNumEntries) {
EXPECT_THAT(
RoundTripRawBtree({
0,
}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Unexpected end of data; .*"));
}
TEST(BtreeNodeTest, CorruptLeafZeroNumEntries) {
EXPECT_THAT(
RoundTripRawBtree({
0,
0,
0,
0,
}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Empty b-tree node; .*"));
}
TEST(BtreeNodeTest, CorruptInteriorZeroNumEntries) {
EXPECT_THAT(
RoundTripRawBtree({
1,
0,
0,
0,
}),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error decoding b-tree node: Empty b-tree node; .*"));
}
TEST(BtreeNodeTest, MaxArity) {
Config config;
config.compression = Config::NoCompression{};
config.max_decoded_node_bytes = 1000000000;
BtreeNode node;
node.height = 0;
auto& entries = node.entries.emplace<BtreeNode::LeafNodeEntries>();
std::vector<std::string> keys;
for (size_t i = 0; i <= kMaxNodeArity; ++i) {
keys.push_back(absl::StrFormat("%07d", i));
}
std::sort(keys.begin(), keys.end());
const auto add_entry = [&](size_t i) {
entries.push_back({keys[i],
absl::Cord()});
};
for (size_t i = 0; i < kMaxNodeArity; ++i) {
add_entry(i);
}
TestBtreeNodeRoundTrip(config, node);
add_entry(kMaxNodeArity);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded_nodes,
EncodeExistingNode(config, node));
ASSERT_EQ(2, encoded_nodes.size());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node1,
DecodeBtreeNode(encoded_nodes[0].encoded_node, {}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded_node2,
DecodeBtreeNode(encoded_nodes[1].encoded_node, {}));
EXPECT_EQ(kMaxNodeArity / 2 + 1,
std::get<BtreeNode::LeafNodeEntries>(decoded_node1.entries).size());
EXPECT_EQ(kMaxNodeArity / 2,
std::get<BtreeNode::LeafNodeEntries>(decoded_node2.entries).size());
}
} |
603 | cpp | google/tensorstore | data_file_id_codec | tensorstore/kvstore/ocdbt/format/data_file_id_codec.cc | tensorstore/kvstore/ocdbt/format/data_file_id_codec_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_FORMAT_DATA_FILE_ID_CODEC_H_
#define TENSORSTORE_KVSTORE_OCDBT_FORMAT_DATA_FILE_ID_CODEC_H_
#include <vector>
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/kvstore/ocdbt/format/codec_util.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
namespace tensorstore {
namespace internal_ocdbt {
using DataFileIndexCodec = VarintCodec<uint64_t>;
class DataFileTableBuilder {
public:
void Add(const DataFileId& data_file_id);
[[nodiscard]] bool Finalize(riegeli::Writer& writer);
size_t GetIndex(const DataFileId& data_file_id) const;
void Clear();
private:
absl::flat_hash_map<DataFileId, size_t> data_files_;
};
struct DataFileTable {
std::vector<DataFileId> files;
};
[[nodiscard]] bool ReadDataFileTable(riegeli::Reader& reader,
const BasePath& transitive_path,
DataFileTable& value);
template <typename IO>
struct DataFileIdCodec;
template <>
struct DataFileIdCodec<riegeli::Reader> {
const DataFileTable& data_file_table;
[[nodiscard]] bool operator()(riegeli::Reader& reader,
DataFileId& value) const;
};
template <>
struct DataFileIdCodec<riegeli::Writer> {
const DataFileTableBuilder& table;
[[nodiscard]] bool operator()(riegeli::Writer& writer,
const DataFileId& value) const {
return DataFileIndexCodec{}(writer, table.GetIndex(value));
}
};
}
}
#endif
#include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include <algorithm>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/varint/varint_reading.h"
#include "riegeli/varint/varint_writing.h"
#include "tensorstore/internal/ref_counted_string.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
using PathLengthCodec = VarintCodec<PathLength>;
}
void DataFileTableBuilder::Add(const DataFileId& data_file_id) {
data_files_.emplace(data_file_id, 0);
}
bool DataFileTableBuilder::Finalize(riegeli::Writer& writer) {
if (!riegeli::WriteVarint64(data_files_.size(), writer)) return false;
if (data_files_.empty()) return true;
std::vector<DataFileId> sorted_data_files;
sorted_data_files.reserve(data_files_.size());
for (const auto& p : data_files_) {
sorted_data_files.push_back(p.first);
}
std::sort(sorted_data_files.begin(), sorted_data_files.end(),
[&](const DataFileId& a, const DataFileId& b) {
if (int c = std::string_view(a.base_path)
.compare(std::string_view(b.base_path));
c != 0) {
return c < 0;
}
return std::string_view(a.relative_path) <
std::string_view(b.relative_path);
});
std::vector<size_t> prefix_lengths(sorted_data_files.size());
prefix_lengths[0] = 0;
for (size_t i = 1; i < sorted_data_files.size(); ++i) {
auto& cur = sorted_data_files[i];
auto& prev = sorted_data_files[i - 1];
std::string_view prev_base_path = prev.base_path;
std::string_view cur_base_path = cur.base_path;
size_t prefix_length =
FindCommonPrefixLength(prev_base_path, cur_base_path);
if (prev_base_path.size() == cur_base_path.size() &&
cur_base_path.size() == prefix_length) {
prefix_length +=
FindCommonPrefixLength(prev.relative_path, cur.relative_path);
}
prefix_lengths[i] = prefix_length;
if (!PathLengthCodec{}(writer, prefix_length)) return false;
}
for (size_t i = 0; i < sorted_data_files.size(); ++i) {
const auto& data_file = sorted_data_files[i];
assert(data_file.base_path.size() + data_file.relative_path.size() <=
kMaxPathLength);
if (!PathLengthCodec{}(writer, data_file.base_path.size() +
data_file.relative_path.size() -
prefix_lengths[i])) {
return false;
}
}
for (size_t i = 0; i < sorted_data_files.size(); ++i) {
const auto& data_file = sorted_data_files[i];
if (!PathLengthCodec{}(writer, data_file.base_path.size())) {
return false;
}
}
for (size_t i = 0; i < sorted_data_files.size(); ++i) {
const auto& data_file = sorted_data_files[i];
size_t prefix_length = prefix_lengths[i];
std::string_view base_path = data_file.base_path;
size_t base_path_prefix_length = std::min(prefix_length, base_path.size());
if (!writer.Write(base_path.substr(base_path_prefix_length))) return false;
std::string_view relative_path = data_file.relative_path;
if (!writer.Write(
relative_path.substr(prefix_length - base_path_prefix_length))) {
return false;
}
auto it = data_files_.find(data_file);
assert(it != data_files_.end());
it->second = i;
}
return true;
}
size_t DataFileTableBuilder::GetIndex(const DataFileId& data_file_id) const {
return data_files_.at(data_file_id);
}
void DataFileTableBuilder::Clear() { data_files_.clear(); }
[[nodiscard]] bool ReadDataFileTable(riegeli::Reader& reader,
const BasePath& transitive_path,
DataFileTable& value) {
ABSL_CHECK_LE(transitive_path.size(), kMaxPathLength);
std::string_view transitive_path_sv = transitive_path;
const size_t max_path_length = kMaxPathLength - transitive_path_sv.size();
uint64_t num_files;
if (!riegeli::ReadVarint64(reader, num_files)) return false;
std::vector<PathLength> path_length_buffer;
constexpr uint64_t kMaxReserve = 1024;
path_length_buffer.reserve(std::min(kMaxReserve, num_files) * 3);
path_length_buffer.push_back(0);
for (uint64_t i = 1; i < num_files; ++i) {
PathLength prefix_length;
if (!PathLengthCodec{}(reader, prefix_length)) return false;
path_length_buffer.push_back(prefix_length);
}
for (uint64_t i = 0; i < num_files; ++i) {
PathLength suffix_length;
if (!PathLengthCodec{}(reader, suffix_length)) return false;
path_length_buffer.push_back(suffix_length);
}
PathLength prev_base_path_length = 0;
for (uint64_t i = 0; i < num_files; ++i) {
PathLength base_path_length;
if (!PathLengthCodec{}(reader, base_path_length)) return false;
size_t prefix_length = path_length_buffer[i];
size_t suffix_length = path_length_buffer[num_files + i];
size_t path_length = prefix_length + suffix_length;
if (path_length > max_path_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"path_length[%d] = prefix_length(%d) + "
"suffix_length(%d) = %d > %d - transitive_length(%d) = %d",
i, prefix_length, suffix_length, path_length, kMaxPathLength,
transitive_path.size(), max_path_length)));
return false;
}
if (base_path_length > path_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"base_path_length[%d] = %d > path_length[%d] = %d = "
"prefix_length(%d) + suffix_length(%d)",
i, base_path_length, i, path_length, prefix_length, suffix_length)));
return false;
}
if (prefix_length > std::min(prev_base_path_length, base_path_length) &&
base_path_length != prev_base_path_length) {
reader.Fail(absl::DataLossError(absl::StrFormat(
"path_prefix_length[%d] = %d > "
"min(base_path_length[%d] = %d, base_path_length[%d] = %d) is not "
"valid if "
"base_path_length[%d] != base_path_length[%d]",
i - 1, prefix_length,
i - 1, prev_base_path_length,
i, base_path_length,
i - 1, i)));
return false;
}
path_length_buffer.push_back(base_path_length);
prev_base_path_length = base_path_length;
}
auto& files = value.files;
files.resize(num_files);
size_t prev_relative_path_length = 0;
for (uint64_t i = 0; i < num_files; ++i) {
size_t prefix_length = path_length_buffer[i];
size_t suffix_length = path_length_buffer[num_files + i];
size_t base_path_length = path_length_buffer[2 * num_files + i];
size_t relative_path_length =
prefix_length + suffix_length - base_path_length;
if (!reader.Pull(suffix_length)) return false;
auto& file = files[i];
if (base_path_length == 0) {
file.base_path = transitive_path;
} else if (prefix_length >= base_path_length) {
assert(files[i - 1].base_path.size() ==
base_path_length + transitive_path.size());
file.base_path = files[i - 1].base_path;
prefix_length -= base_path_length;
} else {
internal::RefCountedStringWriter writer(base_path_length +
transitive_path_sv.size());
std::memcpy(writer.data(), transitive_path_sv.data(),
transitive_path_sv.size());
size_t offset = transitive_path_sv.size();
size_t base_suffix_length = base_path_length > prefix_length
? base_path_length - prefix_length
: 0;
if (prefix_length > 0) {
std::string_view prev_base_path = files[i - 1].base_path;
prev_base_path.remove_prefix(transitive_path_sv.size());
size_t base_prefix_length = std::min(prefix_length, base_path_length);
assert(base_prefix_length <= prev_base_path.size());
std::memcpy(writer.data() + offset, prev_base_path.data(),
base_prefix_length);
offset += base_prefix_length;
prefix_length -= base_prefix_length;
}
if (base_suffix_length) {
std::memcpy(writer.data() + offset, reader.cursor(),
base_suffix_length);
reader.move_cursor(base_suffix_length);
suffix_length -= base_suffix_length;
}
file.base_path = std::move(writer);
}
if (relative_path_length == 0) {
assert(suffix_length == 0);
prev_relative_path_length = 0;
continue;
}
if (suffix_length == 0 &&
relative_path_length == prev_relative_path_length) {
assert(file.base_path == files[i - 1].base_path);
file.relative_path = files[i - 1].relative_path;
continue;
}
internal::RefCountedStringWriter writer(relative_path_length);
size_t offset = 0;
if (prefix_length) {
assert(file.base_path == files[i - 1].base_path);
assert(prefix_length <= relative_path_length);
std::memcpy(writer.data(), files[i - 1].relative_path.data(),
prefix_length);
offset += prefix_length;
}
if (suffix_length > 0) {
assert(offset + suffix_length == relative_path_length);
std::memcpy(writer.data() + offset, reader.cursor(), suffix_length);
reader.move_cursor(suffix_length);
}
file.relative_path = std::move(writer);
prev_relative_path_length = relative_path_length;
}
return true;
}
[[nodiscard]] bool DataFileIdCodec<riegeli::Reader>::operator()(
riegeli::Reader& reader, DataFileId& value) const {
uint64_t index;
if (!DataFileIndexCodec{}(reader, index)) return false;
if (index >= data_file_table.files.size()) {
reader.Fail(absl::DataLossError(
absl::StrFormat("Data file id %d is outside range [0, %d)", index,
data_file_table.files.size())));
return false;
}
value = data_file_table.files[index];
return true;
}
}
} | #include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/varint/varint_writing.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_ocdbt::BasePath;
using ::tensorstore::internal_ocdbt::DataFileId;
using ::tensorstore::internal_ocdbt::DataFileTable;
using ::tensorstore::internal_ocdbt::DataFileTableBuilder;
using ::tensorstore::internal_ocdbt::FinalizeReader;
using ::tensorstore::internal_ocdbt::FinalizeWriter;
using ::tensorstore::internal_ocdbt::kMaxPathLength;
using ::tensorstore::internal_ocdbt::ReadDataFileTable;
Result<absl::Cord> Encode(const DataFileTable& table) {
DataFileTableBuilder builder;
for (const auto& file : table.files) {
builder.Add(file);
}
absl::Cord cord;
{
riegeli::CordWriter writer{&cord};
bool success = builder.Finalize(writer);
TENSORSTORE_RETURN_IF_ERROR(FinalizeWriter(writer, success));
}
return cord;
}
Result<DataFileTable> Decode(const absl::Cord& cord,
const BasePath& base_path = {}) {
DataFileTable new_table;
{
riegeli::CordReader reader{&cord};
bool success = ReadDataFileTable(reader, base_path, new_table);
TENSORSTORE_RETURN_IF_ERROR(FinalizeReader(reader, success));
}
return new_table;
}
Result<DataFileTable> RoundTrip(const DataFileTable& table,
const BasePath& base_path = {}) {
TENSORSTORE_ASSIGN_OR_RETURN(auto cord, Encode(table));
return Decode(cord, base_path);
}
TEST(DataFileBuilderTest, Empty) {
DataFileTable table;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, Encode(table));
EXPECT_EQ(1, encoded.size());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, Decode(encoded));
EXPECT_EQ(table.files, new_table.files);
}
TEST(DataFileBuilderTest, Simple) {
DataFileTable table;
table.files = {
{"b", "d"}, {"a", "c"}, {"a", "b"}, {"b", "e"}, {"b", "d"},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, RoundTrip(table, ""));
ASSERT_THAT(new_table.files, ::testing::ElementsAreArray({
DataFileId{"a", "b"},
DataFileId{"a", "c"},
DataFileId{"b", "d"},
DataFileId{"b", "e"},
}));
}
TEST(DataFileBuilderTest, Prefixes) {
DataFileTable table;
table.files = {
{"", ""},
{"", "a"},
{"", "ab"},
{"", "ac"},
{"a", ""},
{"a", "x"},
{"a", "xy"},
{"a", "xyz"},
{"ab", ""},
{"ab", "xy"},
{"ab", "xyz"},
{"ac", "xy"},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, RoundTrip(table, ""));
ASSERT_THAT(new_table.files, ::testing::ElementsAreArray(table.files));
}
TEST(DataFileBuilderTest, AddBasePath) {
DataFileTable table;
table.files = {
{"b", "d"}, {"a", "c"}, {"a", "b"}, {"b", "e"}, {"b", "d"}, {"", "y"},
};
BasePath base_path = "x/";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_table, RoundTrip(table, base_path));
ASSERT_THAT(new_table.files, ::testing::ElementsAreArray({
DataFileId{"x/", "y"},
DataFileId{"x/a", "b"},
DataFileId{"x/a", "c"},
DataFileId{"x/b", "d"},
DataFileId{"x/b", "e"},
}));
EXPECT_EQ(base_path.data(), new_table.files[0].base_path.data());
EXPECT_EQ(new_table.files[1].base_path.data(),
new_table.files[2].base_path.data());
EXPECT_EQ(new_table.files[3].base_path.data(),
new_table.files[4].base_path.data());
}
TEST(DataFileBuilderTest, Truncated) {
DataFileTable table;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, Encode(table));
ASSERT_EQ(1, encoded.size());
EXPECT_THAT(Decode(encoded.Subcord(0, 0)),
MatchesStatus(absl::StatusCode::kDataLoss));
}
TEST(DataFileBuilderTest, BasePathTooLongWithPrefix) {
DataFileTable table;
DataFileId long_id{std::string_view(std::string(kMaxPathLength, 'x'))};
table.files = {long_id};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, Encode(table));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded, Decode(encoded));
ASSERT_EQ(table.files, decoded.files);
EXPECT_THAT(Decode(encoded, "z"),
MatchesStatus(absl::StatusCode::kDataLoss, "path_length.*"));
}
TEST(DataFileBuilderTest, SuffixLengthTooLong) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(riegeli::WriteVarint64(kMaxPathLength + 1, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid 16-bit varint value.*"));
}
TEST(DataFileBuilderTest, BasePathLengthTooLong) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(riegeli::WriteVarint64(5, writer));
ASSERT_TRUE(riegeli::WriteVarint64(65536, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid 16-bit varint value.*"));
}
TEST(DataFileBuilderTest, PrefixLengthTooLong) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(kMaxPathLength + 1, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid 16-bit varint value.*"));
}
TEST(DataFileBuilderTest, BasePathLongerThanPath) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(riegeli::WriteVarint64(5, writer));
ASSERT_TRUE(riegeli::WriteVarint64(6, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded),
MatchesStatus(absl::StatusCode::kDataLoss, "base_path_length.*"));
}
TEST(DataFileBuilderTest, PrefixLengthLongerThanPrevBasePath) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(2, writer));
ASSERT_TRUE(riegeli::WriteVarint64(0, writer));
ASSERT_TRUE(riegeli::WriteVarint64(0, writer));
ASSERT_TRUE(riegeli::WriteVarint64(1, writer));
ASSERT_TRUE(writer.Close());
EXPECT_THAT(Decode(encoded), MatchesStatus(absl::StatusCode::kDataLoss,
"path_prefix_length.*"));
}
} |
604 | cpp | google/tensorstore | dump | tensorstore/kvstore/ocdbt/format/dump.cc | tensorstore/kvstore/ocdbt/format/dump_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_FORMAT_DUMP_H_
#define TENSORSTORE_KVSTORE_OCDBT_FORMAT_DUMP_H_
#include <string>
#include <string_view>
#include <nlohmann/json.hpp>
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
struct LabeledIndirectDataReference {
IndirectDataKind kind;
IndirectDataReference location;
static Result<LabeledIndirectDataReference> Parse(std::string_view s);
};
::nlohmann::json Dump(const Manifest& manifest);
::nlohmann::json Dump(const BtreeNode& node);
::nlohmann::json Dump(const VersionTreeNode& node);
}
}
#endif
#include "tensorstore/kvstore/ocdbt/format/dump.h"
#include <map>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include <nlohmann/json.hpp>
#include "re2/re2.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_variant.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/ocdbt/config.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_ocdbt {
Result<LabeledIndirectDataReference> LabeledIndirectDataReference::Parse(
std::string_view s) {
LabeledIndirectDataReference r;
static LazyRE2 kPattern = {"([^:]+):([^:]*):([^:]*):([0-9]+):([0-9]+)"};
std::string_view label, encoded_base_path, encoded_relative_path;
if (!RE2::FullMatch(s, *kPattern, &label, &encoded_base_path,
&encoded_relative_path, &r.location.offset,
&r.location.length)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid indirect data reference: ", tensorstore::QuoteString(s)));
}
TENSORSTORE_ASSIGN_OR_RETURN(r.kind, ParseIndirectDataKind(label));
r.location.file_id.base_path = internal::PercentDecode(encoded_base_path);
r.location.file_id.relative_path =
internal::PercentDecode(encoded_relative_path);
TENSORSTORE_RETURN_IF_ERROR(r.location.Validate(false));
return r;
}
namespace {
namespace jb = tensorstore::internal_json_binding;
constexpr auto ConfigBinder = jb::Compose<ConfigConstraints>(
[](auto is_loading, const auto& options, auto* obj, auto* constraints) {
if constexpr (is_loading) {
CreateConfig(constraints, *obj);
if (ConfigConstraints(*obj) != *constraints) {
return absl::InvalidArgumentError("Config is not fully specified");
}
} else {
*constraints = ConfigConstraints(*obj);
}
return absl::OkStatus();
});
static inline constexpr internal::AsciiSet
kLabeledIndirectDataReferenceUnreservedChars{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_./"};
constexpr auto LabeledIndirectDataReferenceBinder =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
if (auto* s = j->template get_ptr<const std::string*>()) {
TENSORSTORE_ASSIGN_OR_RETURN(*obj,
LabeledIndirectDataReference::Parse(*s));
} else {
return internal_json::ExpectedError(*j, "string");
}
} else {
if (obj->location.IsMissing()) {
*j = ::nlohmann::json::value_t::discarded;
} else {
*j = tensorstore::StrCat(
IndirectDataKindToString(obj->kind), ":",
internal::PercentEncodeReserved(
obj->location.file_id.base_path,
kLabeledIndirectDataReferenceUnreservedChars),
":",
internal::PercentEncodeReserved(
obj->location.file_id.relative_path,
kLabeledIndirectDataReferenceUnreservedChars),
":", obj->location.offset, ":", obj->location.length);
}
}
return absl::OkStatus();
};
constexpr auto IndirectDataReferenceBinder(IndirectDataKind kind) {
return jb::Compose<LabeledIndirectDataReference>(
[kind](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
*obj = j->location;
} else {
j->location = *obj;
j->kind = kind;
}
return absl::OkStatus();
},
LabeledIndirectDataReferenceBinder);
}
constexpr auto CommitTimeBinder = jb::Projection<&CommitTime::value>();
constexpr auto BtreeNodeStatisticsBinder = jb::Object(
jb::Member(
"num_indirect_value_bytes",
jb::Projection<&BtreeNodeStatistics::num_indirect_value_bytes>()),
jb::Member("num_tree_bytes",
jb::Projection<&BtreeNodeStatistics::num_tree_bytes>()),
jb::Member("num_keys", jb::Projection<&BtreeNodeStatistics::num_keys>()));
constexpr auto BtreeNodeReferenceBinder = jb::Object(
jb::Member("location",
jb::Projection<&BtreeNodeReference::location>(
IndirectDataReferenceBinder(IndirectDataKind::kBtreeNode))),
jb::Member("statistics", jb::Projection<&BtreeNodeReference::statistics>(
BtreeNodeStatisticsBinder)));
constexpr auto BtreeGenerationReferenceBinder = jb::Object(
jb::Member("root", jb::Projection<&BtreeGenerationReference::root>(
BtreeNodeReferenceBinder)),
jb::Member("generation_number",
jb::Projection<&BtreeGenerationReference::generation_number>()),
jb::Member("root_height",
jb::Projection<&BtreeGenerationReference::root_height>()),
jb::Member("commit_time",
jb::Projection<&BtreeGenerationReference::commit_time>(
CommitTimeBinder)));
constexpr auto VersionNodeReferenceBinder = jb::Object(
jb::Member("location", jb::Projection<&VersionNodeReference::location>(
IndirectDataReferenceBinder(
IndirectDataKind::kVersionNode))),
jb::Member("generation_number",
jb::Projection<&VersionNodeReference::generation_number>()),
jb::Member("height", jb::Projection<&VersionNodeReference::height>()),
jb::Member("num_generations",
jb::Projection<&VersionNodeReference::num_generations>()),
jb::Member(
"commit_time",
jb::Projection<&VersionNodeReference::commit_time>(CommitTimeBinder)));
constexpr auto ManifestBinder = jb::Object(
jb::Member("config", jb::Projection<&Manifest::config>(ConfigBinder)),
jb::Member("versions", jb::Projection<&Manifest::versions>(
jb::Array(BtreeGenerationReferenceBinder))),
jb::Member("version_tree_nodes",
jb::Projection<&Manifest::version_tree_nodes>(
jb::Array(VersionNodeReferenceBinder))));
constexpr auto BinaryCordBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) {
if constexpr (is_loading) {
if (auto* b = j->template get_ptr<const ::nlohmann::json::binary_t*>()) {
*obj = absl::Cord(std::string_view(
reinterpret_cast<const char*>(b->data()), b->size()));
return absl::OkStatus();
} else if (auto* s = j->template get_ptr<const std::string*>()) {
*obj = absl::Cord(*s);
return absl::OkStatus();
} else {
return internal_json::ExpectedError(*j, "string or byte string");
}
} else {
::nlohmann::json::binary_t v;
v.reserve(obj->size());
for (std::string_view chunk : obj->Chunks()) {
v.insert(v.end(), chunk.begin(), chunk.end());
}
*j = std::move(v);
return absl::OkStatus();
}
};
constexpr auto LeafNodeValueReferenceBinder = jb::Variant(
jb::Member("inline_value", BinaryCordBinder),
jb::Member("indirect_value",
IndirectDataReferenceBinder(IndirectDataKind::kValue)));
constexpr auto BtreeLeafNodeEntryBinder(std::string_view key_prefix) {
return
[=](std::false_type is_loading, const auto& options, auto* obj, auto* j) {
::nlohmann::json::binary_t key;
key.insert(key.end(), key_prefix.begin(), key_prefix.end());
key.insert(key.end(), obj->key.begin(), obj->key.end());
::nlohmann::json::object_t x{{"key", key}};
TENSORSTORE_RETURN_IF_ERROR(LeafNodeValueReferenceBinder(
std::false_type{}, IncludeDefaults{}, &obj->value_reference, &x));
*j = std::move(x);
return absl::OkStatus();
};
}
constexpr auto BtreeInteriorNodeEntryBinder(std::string_view key_prefix) {
return [=](std::false_type is_loading, const auto& options, auto* obj,
auto* j) {
::nlohmann::json::binary_t key;
key.insert(key.end(), key_prefix.begin(), key_prefix.end());
key.insert(key.end(), obj->key.begin(), obj->key.end());
auto common_prefix = key;
common_prefix.resize(obj->subtree_common_prefix_length + key_prefix.size());
::nlohmann::json::object_t x;
TENSORSTORE_RETURN_IF_ERROR(BtreeNodeReferenceBinder(
std::false_type{}, IncludeDefaults{}, &obj->node, &x));
x["key"] = key;
x["subtree_common_prefix"] = common_prefix;
*j = std::move(x);
return absl::OkStatus();
};
}
constexpr auto BtreeNodeBinder = jb::Object(
jb::Member("height", jb::Projection<&BtreeNode::height>()),
jb::Member("entries",
[](auto is_loading, const auto& options, auto* obj, auto* j) {
return jb::Variant(
jb::Array(BtreeLeafNodeEntryBinder(obj->key_prefix)),
jb::Array(BtreeInteriorNodeEntryBinder(obj->key_prefix)))(
is_loading, options, &obj->entries, j);
}));
constexpr auto VersionTreeNodeBinder = jb::Object(
jb::Member("height", jb::Projection<&VersionTreeNode::height>()),
jb::Member("version_tree_arity_log2",
jb::Projection<&VersionTreeNode::version_tree_arity_log2>()),
jb::Member("entries", jb::Projection<&VersionTreeNode::entries>(jb::Variant(
jb::Array(BtreeGenerationReferenceBinder),
jb::Array(VersionNodeReferenceBinder)))));
}
::nlohmann::json Dump(const Manifest& manifest) {
return jb::ToJson(manifest, ManifestBinder).value();
}
::nlohmann::json Dump(const BtreeNode& node) {
return jb::ToJson(node, BtreeNodeBinder).value();
}
::nlohmann::json Dump(const VersionTreeNode& node) {
return jb::ToJson(node, VersionTreeNodeBinder).value();
}
}
} | #include "tensorstore/kvstore/ocdbt/format/dump.h"
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_ocdbt::BtreeNode;
using ::tensorstore::internal_ocdbt::CommitTime;
using ::tensorstore::internal_ocdbt::DataFileId;
using ::tensorstore::internal_ocdbt::Dump;
using ::tensorstore::internal_ocdbt::IndirectDataKind;
using ::tensorstore::internal_ocdbt::IndirectDataReference;
using ::tensorstore::internal_ocdbt::LabeledIndirectDataReference;
using ::tensorstore::internal_ocdbt::Manifest;
TEST(LabeledIndirectDataReferenceTest, ParseBtreeNode) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value,
LabeledIndirectDataReference::Parse("btreenode:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, ParseValue) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse("value:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kValue, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, ParseVersionNode) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value,
LabeledIndirectDataReference::Parse("versionnode:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kVersionNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, MaxOffset) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775807:0"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(9223372036854775807, value.location.offset);
EXPECT_EQ(0, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, MaxOffsetAndLength) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775806:1"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(9223372036854775806, value.location.offset);
EXPECT_EQ(1, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, OffsetTooLarge) {
EXPECT_THAT(
LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775808:0"),
MatchesStatus(absl::StatusCode::kDataLoss, "Invalid offset/length .*"));
}
TEST(LabeledIndirectDataReferenceTest, InvalidKind) {
EXPECT_THAT(LabeledIndirectDataReference::Parse("abc:abc:def:0:10"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid indirect data kind: abc"));
}
TEST(LabeledIndirectDataReferenceTest, LengthTooLarge) {
EXPECT_THAT(
LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775807:1"),
MatchesStatus(absl::StatusCode::kDataLoss, "Invalid offset/length .*"));
}
TEST(DumpTest, Manifest) {
Manifest manifest;
manifest.config.uuid = {
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}};
manifest.config.version_tree_arity_log2 = 1;
{
auto& x = manifest.versions.emplace_back();
x.root.location.file_id = {"abc", "def"};
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 15;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 0;
x.commit_time = CommitTime{10};
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 8;
x.height = 3;
x.commit_time = CommitTime{1};
x.num_generations = 8;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 12;
x.height = 2;
x.commit_time = CommitTime{5};
x.num_generations = 4;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 14;
x.height = 1;
x.commit_time = CommitTime{8};
x.num_generations = 2;
}
EXPECT_THAT(Dump(manifest),
MatchesJson({
{"config",
{{"uuid", "000102030405060708090a0b0c0d0e0f"},
{"compression", {{"id", "zstd"}}},
{"max_decoded_node_bytes", 8388608},
{"max_inline_value_bytes", 100},
{"version_tree_arity_log2", 1}}},
{"version_tree_nodes",
{{
{"commit_time", 1},
{"generation_number", 8},
{"height", 3},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 8},
},
{
{"commit_time", 5},
{"generation_number", 12},
{"height", 2},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 4},
},
{
{"commit_time", 8},
{"generation_number", 14},
{"height", 1},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 2},
}}},
{"versions",
{{{"commit_time", 10},
{"root",
{{"location", "btreenode:abc:def:10:42"},
{"statistics",
{{"num_indirect_value_bytes", 101},
{"num_keys", 8},
{"num_tree_bytes", 220}}}}},
{"generation_number", 15},
{"root_height", 0}}}},
}));
}
TEST(DumpTest, BtreeLeafNode) {
BtreeNode node;
node.height = 0;
node.key_prefix = "ab";
auto& entries = node.entries.emplace<BtreeNode::LeafNodeEntries>();
entries.push_back({"c",
absl::Cord("value1")});
entries.push_back({"d",
absl::Cord("value2")});
entries.push_back({"e",
IndirectDataReference{{"abc", "def"}, 1, 25}});
EXPECT_THAT(
Dump(node),
MatchesJson({
{"entries",
{
{
{"inline_value",
::nlohmann::json::binary_t{
std::vector<uint8_t>{'v', 'a', 'l', 'u', 'e', '1'}}},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'c'}}},
},
{
{"inline_value",
::nlohmann::json::binary_t{
std::vector<uint8_t>{'v', 'a', 'l', 'u', 'e', '2'}}},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'd'}}},
},
{
{"indirect_value", "value:abc:def:1:25"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'e'}}},
},
}},
{"height", 0},
}));
}
TEST(DumpTest, BtreeInteriorNode) {
BtreeNode node;
node.height = 2;
auto& entries = node.entries.emplace<BtreeNode::InteriorNodeEntries>();
entries.push_back({"abc",
1,
{
{
{"abc", "def"},
5,
6,
},
{
100,
200,
5,
},
}});
entries.push_back({"def",
1,
{
{
{"ghi", "jkl"},
42,
9,
},
{
101,
220,
8,
},
}});
EXPECT_THAT(
Dump(node),
MatchesJson({
{"entries",
{
{{"location", "btreenode:abc:def:5:6"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'c'}}},
{"subtree_common_prefix",
::nlohmann::json::binary_t{std::vector<uint8_t>{'a'}}},
{
"statistics",
{{"num_indirect_value_bytes", 100},
{"num_keys", 5},
{"num_tree_bytes", 200}},
}},
{
{"location", "btreenode:ghi:jkl:42:9"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'd', 'e', 'f'}}},
{"subtree_common_prefix",
::nlohmann::json::binary_t{std::vector<uint8_t>{'d'}}},
{"statistics",
{{"num_indirect_value_bytes", 101},
{"num_keys", 8},
{"num_tree_bytes", 220}}},
},
}},
{"height", 2},
}));
}
} |
605 | cpp | google/tensorstore | manifest | tensorstore/kvstore/ocdbt/format/manifest.cc | tensorstore/kvstore/ocdbt/format/manifest_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_FORMAT_MANIFEST_H_
#define TENSORSTORE_KVSTORE_OCDBT_FORMAT_MANIFEST_H_
#include <iosfwd>
#include <memory>
#include <string>
#include <string_view>
#include "absl/functional/function_ref.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
std::string GetManifestPath(std::string_view base_path);
std::string GetNumberedManifestPath(std::string_view base_path,
GenerationNumber generation_number);
constexpr GenerationNumber kNumNumberedManifestsToKeep = 128;
struct Manifest {
Config config;
VersionTreeNode::LeafNodeEntries versions;
VersionTreeNode::InteriorNodeEntries version_tree_nodes;
const BtreeGenerationReference& latest_version() const {
return versions.back();
}
GenerationNumber latest_generation() const {
return latest_version().generation_number;
}
friend bool operator==(const Manifest& a, const Manifest& b);
friend bool operator!=(const Manifest& a, const Manifest& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os, const Manifest& e);
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.config, x.versions, x.version_tree_nodes);
};
};
inline GenerationNumber GetLatestGeneration(const Manifest* manifest) {
return manifest ? manifest->latest_generation() : 0;
}
struct ManifestWithTime {
std::shared_ptr<const Manifest> manifest;
absl::Time time;
};
Result<Manifest> DecodeManifest(const absl::Cord& encoded);
Result<absl::Cord> EncodeManifest(const Manifest& manifest,
bool encode_as_single = false);
void ForEachManifestVersionTreeNodeRef(
GenerationNumber generation_number, uint8_t version_tree_arity_log2,
absl::FunctionRef<void(GenerationNumber min_generation_number,
GenerationNumber max_generation_number,
VersionTreeHeight height)>
callback);
#ifndef NDEBUG
void CheckManifestInvariants(const Manifest& manifest,
bool assume_single = false);
#endif
}
}
#endif
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include <cassert>
#include <ostream>
#include <string>
#include <string_view>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/ocdbt/format/codec_util.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/config_codec.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id_codec.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree_codec.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_ocdbt {
constexpr uint32_t kManifestMagic = 0x0cdb3a2a;
constexpr uint8_t kManifestFormatVersion = 0;
void ForEachManifestVersionTreeNodeRef(
GenerationNumber generation_number, uint8_t version_tree_arity_log2,
absl::FunctionRef<void(GenerationNumber min_generation_number,
GenerationNumber max_generation_number,
VersionTreeHeight height)>
callback) {
generation_number = (generation_number - 1) >> version_tree_arity_log2
<< version_tree_arity_log2;
VersionTreeHeight height = 1;
while (generation_number) {
GenerationNumber next_generation_number =
(generation_number - 1)
>> (height + 1) * version_tree_arity_log2
<< (height + 1) * version_tree_arity_log2;
GenerationNumber min_generation_number = next_generation_number + 1;
callback(min_generation_number, generation_number, height);
++height;
generation_number = next_generation_number;
}
}
absl::Status ValidateManifestVersionTreeNodes(
VersionTreeArityLog2 version_tree_arity_log2,
GenerationNumber last_generation_number,
const std::vector<VersionNodeReference>& entries) {
const auto max_height = GetMaxVersionTreeHeight(version_tree_arity_log2);
for (size_t i = 0; i < entries.size(); ++i) {
auto& entry = entries[i];
if (entry.height == 0 || entry.height > max_height) {
return absl::DataLossError(absl::StrFormat(
"entry_height[%d] outside valid range [1, %d]", i, max_height));
}
if (entry.generation_number == 0) {
return absl::DataLossError(
absl::StrFormat("generation_number[%d] must be non-zero", i));
}
if (i > 0) {
if (entry.generation_number <= entries[i - 1].generation_number) {
return absl::DataLossError(absl::StrFormat(
"generation_number[%d]=%d <= generation_number[%d]=%d", i,
entry.generation_number, i - 1, entries[i - 1].generation_number));
}
if (entry.height >= entries[i - 1].height) {
return absl::DataLossError(
absl::StrFormat("entry_height[%d]=%d >= entry_height[%d]=%d", i,
entry.height, i - 1, entries[i - 1].height));
}
}
}
size_t i = entries.size();
absl::Status status;
ForEachManifestVersionTreeNodeRef(
last_generation_number, version_tree_arity_log2,
[&](GenerationNumber min_generation_number,
GenerationNumber max_generation_number, VersionTreeHeight height) {
if (!status.ok()) {
return;
}
if (i == 0) {
return;
}
auto& entry = entries[i - 1];
if (entry.height != height) {
return;
}
--i;
if (entry.generation_number < min_generation_number ||
entry.generation_number > max_generation_number) {
status = absl::DataLossError(
absl::StrFormat("generation_number[%d]=%d is outside expected "
"range [%d, %d] for height %d",
i, entry.generation_number, min_generation_number,
max_generation_number, entry.height));
}
});
if (!status.ok()) return status;
if (i != 0) {
return absl::DataLossError(
absl::StrFormat("Unexpected child with generation_number[%d]=%d and "
"entry_height[%d]=%d given last generation_number=%d",
i - 1, entries[i - 1].generation_number, i - 1,
entries[i - 1].height, last_generation_number));
}
return absl::OkStatus();
}
bool ReadManifestVersionTreeNodes(
riegeli::Reader& reader, VersionTreeArityLog2 version_tree_arity_log2,
const DataFileTable& data_file_table,
std::vector<VersionNodeReference>& version_tree_nodes,
GenerationNumber last_generation_number) {
const size_t max_num_entries =
GetMaxVersionTreeHeight(version_tree_arity_log2);
if (!VersionTreeInteriorNodeEntryArrayCodec<DataFileTable>{
data_file_table, max_num_entries, true}(
reader, version_tree_nodes)) {
return false;
}
TENSORSTORE_RETURN_IF_ERROR(
ValidateManifestVersionTreeNodes(
version_tree_arity_log2, last_generation_number, version_tree_nodes),
reader.Fail(_), false);
return true;
}
Result<absl::Cord> EncodeManifest(const Manifest& manifest,
bool encode_as_single) {
#ifndef NDEBUG
CheckManifestInvariants(manifest, encode_as_single);
#endif
return EncodeWithOptionalCompression(
manifest.config, kManifestMagic, kManifestFormatVersion,
[&](riegeli::Writer& writer) -> bool {
if (encode_as_single) {
Config new_config = manifest.config;
new_config.manifest_kind = ManifestKind::kSingle;
if (!ConfigCodec{}(writer, new_config)) return false;
} else {
if (!ConfigCodec{}(writer, manifest.config)) return false;
if (manifest.config.manifest_kind != ManifestKind::kSingle) {
return true;
}
}
DataFileTableBuilder data_file_table;
internal_ocdbt::AddDataFiles(data_file_table, manifest.versions);
internal_ocdbt::AddDataFiles(data_file_table,
manifest.version_tree_nodes);
if (!data_file_table.Finalize(writer)) return false;
if (!WriteVersionTreeNodeEntries(manifest.config, writer,
data_file_table, manifest.versions)) {
return false;
}
if (!VersionTreeInteriorNodeEntryArrayCodec<DataFileTableBuilder>{
data_file_table,
GetMaxVersionTreeHeight(
manifest.config.version_tree_arity_log2),
true}(writer, manifest.version_tree_nodes)) {
return false;
}
return true;
});
}
Result<Manifest> DecodeManifest(const absl::Cord& encoded) {
Manifest manifest;
auto status = DecodeWithOptionalCompression(
encoded, kManifestMagic, kManifestFormatVersion,
[&](riegeli::Reader& reader, uint32_t version) -> bool {
if (!ConfigCodec{}(reader, manifest.config)) return false;
if (manifest.config.manifest_kind != ManifestKind::kSingle) {
return true;
}
DataFileTable data_file_table;
if (!ReadDataFileTable(reader, {}, data_file_table)) {
return false;
}
if (!ReadVersionTreeLeafNode(manifest.config.version_tree_arity_log2,
reader, data_file_table,
manifest.versions)) {
return false;
}
if (!ReadManifestVersionTreeNodes(
reader, manifest.config.version_tree_arity_log2,
data_file_table, manifest.version_tree_nodes,
manifest.versions.back().generation_number)) {
return false;
}
return true;
});
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(status, "Error decoding manifest");
}
#ifndef NDEBUG
CheckManifestInvariants(manifest);
#endif
return manifest;
}
bool operator==(const Manifest& a, const Manifest& b) {
return a.config == b.config && a.versions == b.versions &&
a.version_tree_nodes == b.version_tree_nodes;
}
std::ostream& operator<<(std::ostream& os, const Manifest& e) {
os << "{config=" << e.config;
if (e.config.manifest_kind == ManifestKind::kSingle) {
os << ", versions=" << tensorstore::span(e.versions)
<< ", version_tree_nodes=" << tensorstore::span(e.version_tree_nodes);
}
return os << "}";
}
std::string GetManifestPath(std::string_view base_path) {
return tensorstore::StrCat(base_path, "manifest.ocdbt");
}
std::string GetNumberedManifestPath(std::string_view base_path,
GenerationNumber generation_number) {
return absl::StrFormat("%smanifest.%016x", base_path, generation_number);
}
#ifndef NDEBUG
void CheckManifestInvariants(const Manifest& manifest, bool assume_single) {
assert(manifest.config.version_tree_arity_log2 > 0);
assert(manifest.config.version_tree_arity_log2 <= kMaxVersionTreeArityLog2);
if (manifest.config.manifest_kind == ManifestKind::kSingle || assume_single) {
TENSORSTORE_CHECK_OK(ValidateVersionTreeLeafNodeEntries(
manifest.config.version_tree_arity_log2, manifest.versions));
TENSORSTORE_CHECK_OK(ValidateManifestVersionTreeNodes(
manifest.config.version_tree_arity_log2,
manifest.versions.back().generation_number,
manifest.version_tree_nodes));
} else {
assert(manifest.versions.empty());
assert(manifest.version_tree_nodes.empty());
}
}
#endif
}
} | #include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_ocdbt::CommitTime;
using ::tensorstore::internal_ocdbt::DecodeManifest;
using ::tensorstore::internal_ocdbt::Manifest;
Result<absl::Time> RoundTripCommitTime(absl::Time time) {
TENSORSTORE_ASSIGN_OR_RETURN(auto commit_time,
CommitTime::FromAbslTime(time));
return static_cast<absl::Time>(commit_time);
}
TEST(CommitTimeTest, Simple) {
EXPECT_THAT(RoundTripCommitTime(absl::FromUnixNanos(0)),
::testing::Optional(absl::FromUnixNanos(0)));
EXPECT_THAT(RoundTripCommitTime(absl::FromUnixNanos(-1)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(RoundTripCommitTime(
absl::FromUnixNanos(std::numeric_limits<int64_t>::max())),
::testing::Optional(
absl::FromUnixNanos(std::numeric_limits<int64_t>::max())));
EXPECT_THAT(RoundTripCommitTime(
absl::FromUnixNanos(std::numeric_limits<int64_t>::max()) +
absl::Nanoseconds(1)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
void TestManifestRoundTrip(const Manifest& manifest) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, EncodeManifest(manifest));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded, DecodeManifest(encoded));
EXPECT_EQ(manifest, decoded);
}
Manifest GetSimpleManifest() {
Manifest manifest;
auto& x = manifest.versions.emplace_back();
x.root.location.file_id.base_path = "abc";
x.root.location.file_id.relative_path = "defgh";
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 1;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 0;
x.commit_time = CommitTime{1};
return manifest;
}
TEST(ManifestTest, RoundTrip) { TestManifestRoundTrip(GetSimpleManifest()); }
TEST(ManifestTest, RoundTripNonZeroHeight) {
Manifest manifest;
{
auto& x = manifest.versions.emplace_back();
x.root.location.file_id.base_path = "abc";
x.root.location.file_id.relative_path = "defgh";
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 1;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 5;
x.commit_time = CommitTime{1};
}
TestManifestRoundTrip(manifest);
}
TEST(ManifestTest, CorruptMagic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
absl::Cord corrupt = encoded;
corrupt.RemovePrefix(4);
corrupt.Prepend("abcd");
EXPECT_THAT(DecodeManifest(corrupt),
MatchesStatus(
absl::StatusCode::kDataLoss,
".*: Expected to start with hex bytes .* but received: .*"));
}
TEST(ManifestTest, CorruptLength) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
auto corrupt = encoded;
corrupt.Append("x");
EXPECT_THAT(
DecodeManifest(corrupt),
MatchesStatus(absl::StatusCode::kDataLoss, ".*: Length in header .*"));
}
TEST(ManifestTest, InvalidVersion) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
auto corrupt = encoded.Subcord(0, 12);
corrupt.Append(std::string(1, 1));
corrupt.Append(encoded.Subcord(13, -1));
EXPECT_THAT(
DecodeManifest(corrupt),
MatchesStatus(absl::StatusCode::kDataLoss,
".*: Maximum supported version is 0 but received: 1.*"));
}
TEST(ManifestTest, CorruptChecksum) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeManifest(GetSimpleManifest()));
auto corrupt = encoded;
auto sv = corrupt.Flatten();
unsigned char final_char = sv.back();
++final_char;
corrupt.RemoveSuffix(1);
corrupt.Append(std::string(1, final_char));
EXPECT_THAT(DecodeManifest(corrupt),
MatchesStatus(absl::StatusCode::kDataLoss,
".*: CRC-32C checksum verification failed.*"));
}
TEST(ManifestTest, RoundTripMultipleVersions) {
Manifest manifest;
manifest.config.version_tree_arity_log2 = 1;
{
auto& x = manifest.versions.emplace_back();
x.root.location.file_id.base_path = "abc";
x.root.location.file_id.relative_path = "defgh";
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 15;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 0;
x.commit_time = CommitTime{10};
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id.base_path = "abc";
x.location.file_id.relative_path = "defgh";
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 8;
x.height = 3;
x.commit_time = CommitTime{1};
x.num_generations = 8;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id.base_path = "abc";
x.location.file_id.relative_path = "defgh1";
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 12;
x.height = 2;
x.commit_time = CommitTime{5};
x.num_generations = 4;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id.base_path = "abc1";
x.location.file_id.relative_path = "defgh";
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 14;
x.height = 1;
x.commit_time = CommitTime{8};
x.num_generations = 2;
}
TestManifestRoundTrip(manifest);
}
namespace for_each_manifest_version_tree_node_ref {
using ::tensorstore::internal_ocdbt::ForEachManifestVersionTreeNodeRef;
using ::tensorstore::internal_ocdbt::GenerationNumber;
using ::tensorstore::internal_ocdbt::VersionTreeArityLog2;
using R = std::tuple<GenerationNumber, GenerationNumber, int>;
std::vector<R> GetRanges(GenerationNumber generation_number,
VersionTreeArityLog2 version_tree_arity_log2) {
std::vector<R> results;
ForEachManifestVersionTreeNodeRef(
generation_number, version_tree_arity_log2,
[&](GenerationNumber min_generation_number,
GenerationNumber max_generation_number, VersionTreeArityLog2 height) {
results.emplace_back(min_generation_number, max_generation_number,
height);
});
return results;
}
TEST(ForEachManifestVersionTreeNodeRefTest, SimpleCases) {
EXPECT_THAT(GetRanges(8, 2), ::testing::ElementsAre(R{1, 4, 1}));
EXPECT_THAT(GetRanges(9, 2), ::testing::ElementsAre(R{1, 8, 1}));
EXPECT_THAT(GetRanges(17, 2), ::testing::ElementsAre(R{1, 16, 1}));
EXPECT_THAT(GetRanges(30, 2),
::testing::ElementsAre(R{17, 28, 1}, R{1, 16, 2}));
EXPECT_THAT(GetRanges(43, 2),
::testing::ElementsAre(R{33, 40, 1}, R{1, 32, 2}));
EXPECT_THAT(GetRanges(17, 1),
::testing::ElementsAre(R{13, 16, 1}, R{9, 12, 2}, R{1, 8, 3}));
}
class ForEachManifestVersionTreeNodeRefPropertyTest
: public ::testing::TestWithParam<std::tuple<GenerationNumber, int>> {};
TEST_P(ForEachManifestVersionTreeNodeRefPropertyTest, Properties) {
auto [generation_number, version_tree_arity_log2] = GetParam();
auto range = GetRanges(generation_number, version_tree_arity_log2);
SCOPED_TRACE(
absl::StrFormat("generation_number=%d, version_tree_arity_log2=%d",
generation_number, version_tree_arity_log2));
SCOPED_TRACE(::testing::PrintToString(range));
for (size_t i = 0; i < range.size(); ++i) {
auto [min_gen, max_gen, height] = range[i];
SCOPED_TRACE(
absl::StrFormat("i=%d,height=%d,min_generation=%d,max_generation=%d", i,
height, min_gen, max_gen));
EXPECT_EQ(height, i + 1);
EXPECT_LT(max_gen, generation_number);
EXPECT_GT(max_gen, 0);
EXPECT_GT(min_gen, 0);
EXPECT_LT(min_gen, max_gen);
EXPECT_EQ(
0, max_gen % (GenerationNumber(1) << height * version_tree_arity_log2));
if (i == 0) {
EXPECT_GE(max_gen + (GenerationNumber(1) << version_tree_arity_log2),
generation_number);
}
if (i > 0) {
auto [prev_min_gen, prev_max_gen, prev_height] = range[i - 1];
EXPECT_EQ(prev_min_gen, max_gen + 1);
}
}
}
std::string PrintPropertyTestValue(
const ::testing::TestParamInfo<std::tuple<GenerationNumber, int>>& info) {
const auto [generation_number, version_tree_arity_log2] = info.param;
return absl::StrFormat("%d_%d", generation_number, version_tree_arity_log2);
}
INSTANTIATE_TEST_SUITE_P(
Combinations, ForEachManifestVersionTreeNodeRefPropertyTest,
::testing::Combine(::testing::ValuesIn<GenerationNumber>({
1,
2,
101,
12345,
567890,
}),
::testing::ValuesIn<int>({
1,
2,
3,
4,
})),
PrintPropertyTestValue);
INSTANTIATE_TEST_SUITE_P(
Simple, ForEachManifestVersionTreeNodeRefPropertyTest,
(::testing::ValuesIn<std::tuple<GenerationNumber, int>>({
{8, 2},
{9, 2},
{17, 2},
{43, 2},
{17, 1},
})),
PrintPropertyTestValue);
}
} |
606 | cpp | google/tensorstore | zip_dir_cache | tensorstore/kvstore/zip/zip_dir_cache.cc | tensorstore/kvstore/zip/zip_dir_cache_test.cc | #ifndef TENSORSTORE_KVSTORE_ZIP_ZIP_DIR_CACHE_H_
#define TENSORSTORE_KVSTORE_ZIP_ZIP_DIR_CACHE_H_
#include <stddef.h>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/util/executor.h"
namespace tensorstore {
namespace internal_zip_kvstore {
struct Directory {
struct Entry {
std::string filename;
uint32_t crc;
uint64_t compressed_size;
uint64_t uncompressed_size;
uint64_t local_header_offset;
uint64_t estimated_size;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.filename, x.crc, x.compressed_size, x.uncompressed_size,
x.local_header_offset, x.estimated_size);
};
template <typename Sink>
friend void AbslStringify(Sink& sink, const Entry& entry) {
absl::Format(
&sink,
"Entry{filename=%s, crc=%d, compressed_size=%d, "
"uncompressed_size=%d, local_header_offset=%d, estimated_size=%d}",
entry.filename, entry.crc, entry.compressed_size,
entry.uncompressed_size, entry.local_header_offset,
entry.estimated_size);
}
};
std::vector<Entry> entries;
bool full_read;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.entries, x.full_read);
};
template <typename Sink>
friend void AbslStringify(Sink& sink, const Directory& entry) {
absl::Format(&sink, "Directory{\n");
for (const auto& entry : entry.entries) {
absl::Format(&sink, "%v\n", entry);
}
absl::Format(&sink, "}");
}
};
class ZipDirectoryCache : public internal::AsyncCache {
using Base = internal::AsyncCache;
public:
using ReadData = Directory;
explicit ZipDirectoryCache(kvstore::DriverPtr kvstore_driver,
Executor executor)
: kvstore_driver_(std::move(kvstore_driver)),
executor_(std::move(executor)) {}
class Entry : public Base::Entry {
public:
using OwningCache = ZipDirectoryCache;
size_t ComputeReadDataSizeInBytes(const void* read_data) final;
void DoRead(AsyncCacheReadRequest request) final;
};
Entry* DoAllocateEntry() final;
size_t DoGetSizeofEntry() final;
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
ABSL_UNREACHABLE();
}
kvstore::DriverPtr kvstore_driver_;
Executor executor_;
const Executor& executor() { return executor_; }
};
}
}
#endif
#include "tensorstore/kvstore/zip/zip_dir_cache.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/compression/zip_details.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
namespace tensorstore {
namespace internal_zip_kvstore {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag zip_logging("zip");
struct ReadDirectoryOp
: public internal::AtomicReferenceCount<ReadDirectoryOp> {
ZipDirectoryCache::Entry* entry_;
std::shared_ptr<const Directory> existing_read_data_;
kvstore::ReadOptions options_;
internal_zip::ZipEOCD eocd_;
void StartEOCDBlockRead() {
auto& cache = internal::GetOwningCache(*entry_);
ABSL_LOG_IF(INFO, zip_logging)
<< "StartEOCDBlockRead " << entry_->key() << " " << options_.byte_range;
auto future =
cache.kvstore_driver_->Read(std::string(entry_->key()), options_);
future.Force();
future.ExecuteWhenReady(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this)](
ReadyFuture<kvstore::ReadResult> ready) {
self->OnEOCDBlockRead(std::move(ready));
});
}
void OnEOCDBlockRead(ReadyFuture<kvstore::ReadResult> ready) {
auto& r = ready.result();
if (!r.ok()) {
ABSL_LOG_IF(INFO, zip_logging) << r.status();
if (absl::IsOutOfRange(r.status())) {
assert(!options_.byte_range.IsFull());
options_.byte_range = OptionalByteRangeRequest{};
StartEOCDBlockRead();
return;
}
entry_->ReadError(
internal::ConvertInvalidArgumentToFailedPrecondition(r.status()));
return;
}
auto& read_result = *r;
if (read_result.aborted()) {
entry_->ReadSuccess(ZipDirectoryCache::ReadState{
entry_->read_request_state_.read_state.data,
std::move(read_result.stamp)});
return;
}
if (read_result.not_found()) {
entry_->ReadError(absl::NotFoundError(""));
return;
}
GetOwningCache(*entry_).executor()(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this),
ready = std::move(ready)]() {
self->DoDecodeEOCDBlock(std::move(ready));
});
}
void DoDecodeEOCDBlock(ReadyFuture<kvstore::ReadResult> ready) {
absl::Cord* eocd_block = &ready.value().value;
riegeli::CordReader<absl::Cord*> reader(eocd_block);
int64_t block_offset =
options_.byte_range.IsFull() ? 0 : options_.byte_range.inclusive_min;
auto read_eocd_variant = TryReadFullEOCD(reader, eocd_, block_offset);
if (auto* status = std::get_if<absl::Status>(&read_eocd_variant);
status != nullptr && !status->ok()) {
entry_->ReadError(std::move(*status));
return;
}
if (auto* inclusive_min = std::get_if<int64_t>(&read_eocd_variant);
inclusive_min != nullptr) {
assert(!options_.byte_range.IsFull());
options_.byte_range = OptionalByteRangeRequest::Suffix(*inclusive_min);
StartEOCDBlockRead();
return;
}
if (block_offset >= 0 && block_offset <= eocd_.cd_offset) {
DoDecodeDirectory(std::move(ready), eocd_.cd_offset - block_offset);
return;
}
kvstore::ReadOptions other_options = options_;
other_options.generation_conditions.if_equal =
ready.value().stamp.generation;
other_options.byte_range = OptionalByteRangeRequest::Range(
eocd_.cd_offset, eocd_.cd_offset + eocd_.cd_size);
auto& cache = internal::GetOwningCache(*entry_);
auto future =
cache.kvstore_driver_->Read(std::string(entry_->key()), other_options);
future.Force();
future.ExecuteWhenReady(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this)](
ReadyFuture<kvstore::ReadResult> ready) {
self->OnDirectoryBlockRead(std::move(ready));
});
}
void OnDirectoryBlockRead(ReadyFuture<kvstore::ReadResult> ready) {
auto& r = ready.result();
if (!r.ok()) {
ABSL_LOG_IF(INFO, zip_logging) << r.status();
entry_->ReadError(
internal::ConvertInvalidArgumentToFailedPrecondition(r.status()));
return;
}
auto& read_result = *r;
if (read_result.aborted() || read_result.not_found() ||
!ready.value().has_value()) {
entry_->ReadError(
absl::InvalidArgumentError("Faild to read ZIP directory"));
return;
}
GetOwningCache(*entry_).executor()(
[self = internal::IntrusivePtr<ReadDirectoryOp>(this),
ready = std::move(ready)]() {
self->DoDecodeDirectory(std::move(ready), 0);
});
}
void DoDecodeDirectory(ReadyFuture<kvstore::ReadResult> ready,
size_t seek_pos) {
absl::Cord* cd_block = &ready.value().value;
riegeli::CordReader<absl::Cord*> reader(cd_block);
if (seek_pos > 0) {
reader.Seek(seek_pos);
}
Directory dir{};
dir.full_read = options_.byte_range.IsFull();
dir.entries.reserve(eocd_.num_entries);
for (size_t i = 0; i < eocd_.num_entries; ++i) {
internal_zip::ZipEntry entry{};
if (auto entry_status = ReadCentralDirectoryEntry(reader, entry);
!entry_status.ok()) {
entry_->ReadError(entry_status);
return;
}
if (ValidateEntryIsSupported(entry).ok()) {
ABSL_LOG_IF(INFO, zip_logging) << "Adding " << entry;
dir.entries.push_back(
Directory::Entry{entry.filename, entry.crc, entry.compressed_size,
entry.uncompressed_size, entry.local_header_offset,
entry.estimated_read_size});
} else {
ABSL_LOG_IF(INFO, zip_logging) << "Skipping " << entry;
}
}
std::sort(dir.entries.begin(), dir.entries.end(),
[](const auto& a, const auto& b) {
return std::tie(a.local_header_offset, a.filename) <
std::tie(b.local_header_offset, b.filename);
});
auto last_header_offset = eocd_.cd_offset;
for (auto it = dir.entries.rbegin(); it != dir.entries.rend(); ++it) {
it->estimated_size = last_header_offset - it->local_header_offset;
last_header_offset = it->local_header_offset;
}
std::sort(dir.entries.begin(), dir.entries.end(),
[](const auto& a, const auto& b) {
return std::tie(a.filename, a.local_header_offset) <
std::tie(b.filename, a.local_header_offset);
});
ABSL_LOG_IF(INFO, zip_logging) << dir;
entry_->ReadSuccess(ZipDirectoryCache::ReadState{
std::make_shared<const Directory>(std::move(dir)),
std::move(ready.value().stamp)});
}
};
}
size_t ZipDirectoryCache::Entry::ComputeReadDataSizeInBytes(
const void* read_data) {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(read_data));
}
void ZipDirectoryCache::Entry::DoRead(AsyncCacheReadRequest request) {
auto state = internal::MakeIntrusivePtr<ReadDirectoryOp>();
state->entry_ = this;
{
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(*this);
state->existing_read_data_ = lock.shared_data();
state->options_.generation_conditions.if_not_equal =
lock.read_state().stamp.generation;
}
state->options_.staleness_bound = request.staleness_bound;
if (state->existing_read_data_ && state->existing_read_data_->full_read) {
state->options_.byte_range = OptionalByteRangeRequest{};
} else {
state->options_.byte_range =
OptionalByteRangeRequest::SuffixLength(internal_zip::kEOCDBlockSize);
}
state->StartEOCDBlockRead();
}
ZipDirectoryCache::Entry* ZipDirectoryCache::DoAllocateEntry() {
return new Entry;
}
size_t ZipDirectoryCache::DoGetSizeofEntry() { return sizeof(Entry); }
}
} | #include "tensorstore/kvstore/zip/zip_dir_cache.h"
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Context;
using ::tensorstore::InlineExecutor;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal_zip_kvstore::Directory;
using ::tensorstore::internal_zip_kvstore::ZipDirectoryCache;
using ::tensorstore::kvstore::DriverPtr;
ABSL_FLAG(std::string, tensorstore_test_data, "",
"Path to internal/compression/testdata/data.zip");
namespace {
absl::Cord GetTestZipFileData() {
ABSL_CHECK(!absl::GetFlag(FLAGS_tensorstore_test_data).empty());
absl::Cord filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(
riegeli::FdReader(absl::GetFlag(FLAGS_tensorstore_test_data)), filedata));
ABSL_CHECK_EQ(filedata.size(), 319482);
return filedata;
}
TEST(ZipDirectoryKvsTest, Basic) {
auto context = Context::Default();
auto pool = CachePool::Make(CachePool::Limits{});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context).result());
ASSERT_THAT(
tensorstore::kvstore::Write(memory, "data.zip", GetTestZipFileData())
.result(),
::tensorstore::IsOk());
auto cache = GetCache<ZipDirectoryCache>(pool.get(), "", [&] {
return std::make_unique<ZipDirectoryCache>(memory.driver, InlineExecutor{});
});
auto entry = GetCacheEntry(cache, "data.zip");
auto status = entry->Read({absl::InfinitePast()}).status();
ASSERT_THAT(status, ::tensorstore::IsOk());
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(*entry);
auto* dir = lock.data();
ASSERT_THAT(dir, ::testing::NotNull());
ASSERT_THAT(dir->entries, ::testing::SizeIs(3));
EXPECT_THAT(dir->entries[0].filename, "data/a.png");
EXPECT_THAT(dir->entries[1].filename, "data/bb.png");
EXPECT_THAT(dir->entries[2].filename, "data/c.png");
}
TEST(ZipDirectoryKvsTest, MissingEntry) {
auto context = Context::Default();
auto pool = CachePool::Make(CachePool::Limits{});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context).result());
auto cache = GetCache<ZipDirectoryCache>(pool.get(), "", [&] {
return std::make_unique<ZipDirectoryCache>(memory.driver, InlineExecutor{});
});
auto entry = GetCacheEntry(cache, "data.zip");
auto status = entry->Read({absl::InfinitePast()}).status();
EXPECT_THAT(status, ::tensorstore::StatusIs(absl::StatusCode::kNotFound));
}
static constexpr unsigned char kZipTest2[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x04, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x55, 0x54,
0x09, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x04, 0x00, 0x64, 0x00, 0x14, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x98,
0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x55, 0x54, 0x09, 0x00, 0x03, 0x09, 0x15, 0xe4, 0x41, 0x9a,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x04, 0x00, 0xe8, 0x03, 0x64, 0x00,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x0d, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55, 0x54, 0x09, 0x00, 0x03,
0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x04, 0x00,
0xe8, 0x03, 0x64, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x55, 0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7b, 0x98, 0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x10, 0x00, 0xed, 0x41, 0x3c, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x55, 0x54, 0x05, 0x00, 0x03, 0x09,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x77, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55,
0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
0xca, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00,
};
TEST(ZipDirectoryKvsTest, MinimalZip) {
auto context = Context::Default();
auto pool = CachePool::Make(CachePool::Limits{});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context).result());
ASSERT_THAT(tensorstore::kvstore::Write(
memory, "data.zip",
absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(kZipTest2),
sizeof(kZipTest2)),
[](auto) {}))
.result(),
::tensorstore::IsOk());
auto cache = GetCache<ZipDirectoryCache>(pool.get(), "", [&] {
return std::make_unique<ZipDirectoryCache>(memory.driver, InlineExecutor{});
});
auto entry = GetCacheEntry(cache, "data.zip");
auto status = entry->Read({absl::InfinitePast()}).status();
ASSERT_THAT(status, ::tensorstore::IsOk());
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(*entry);
auto* dir = lock.data();
ASSERT_THAT(dir, ::testing::NotNull());
ASSERT_THAT(dir->entries, ::testing::SizeIs(2));
EXPECT_THAT(dir->entries[0].filename, "test");
EXPECT_THAT(dir->entries[1].filename, "testdir/test2");
}
} |
607 | cpp | google/tensorstore | neuroglancer_uint64_sharded | tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded_test.cc | #ifndef TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_NEUROGLANCER_UINT64_SHARDED_H_
#define TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_NEUROGLANCER_UINT64_SHARDED_H_
#include <stdint.h>
#include <functional>
#include <optional>
#include <string>
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/executor.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
using GetMaxChunksPerShardFunction = std::function<uint64_t(uint64_t)>;
kvstore::DriverPtr GetShardedKeyValueStore(
kvstore::DriverPtr base_kvstore, Executor executor, std::string key_prefix,
const ShardingSpec& sharding_spec, internal::CachePool::WeakPtr cache_pool,
GetMaxChunksPerShardFunction get_max_chunks_per_shard = {});
std::string ChunkIdToKey(ChunkId chunk_id);
std::optional<ChunkId> KeyToChunkId(std::string_view key);
}
}
#endif
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/util/execution/result_sender.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
using ::tensorstore::internal::ConvertInvalidArgumentToFailedPrecondition;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::SupportedFeatures;
class MinishardIndexKeyValueStore : public kvstore::Driver {
public:
explicit MinishardIndexKeyValueStore(kvstore::DriverPtr base,
Executor executor,
std::string key_prefix,
const ShardingSpec& sharding_spec)
: base_(std::move(base)),
executor_(std::move(executor)),
key_prefix_(key_prefix),
sharding_spec_(sharding_spec) {}
Future<ReadResult> Read(Key key, ReadOptions options) override;
std::string DescribeKey(std::string_view key) override {
ChunkCombinedShardInfo combined_info;
if (key.size() != sizeof(combined_info)) {
return tensorstore::StrCat("invalid key ", tensorstore::QuoteString(key));
}
std::memcpy(&combined_info, key.data(), sizeof(combined_info));
auto split_info = GetSplitShardInfo(sharding_spec_, combined_info);
return tensorstore::StrCat(
"minishard ", split_info.minishard, " in ",
base_->DescribeKey(
GetShardKey(sharding_spec_, key_prefix_, split_info.shard)));
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final {
}
kvstore::Driver* base() { return base_.get(); }
const ShardingSpec& sharding_spec() { return sharding_spec_; }
const std::string& key_prefix() const { return key_prefix_; }
const Executor& executor() const { return executor_; }
kvstore::DriverPtr base_;
Executor executor_;
std::string key_prefix_;
ShardingSpec sharding_spec_;
};
namespace {
using ShardIndex = uint64_t;
using MinishardIndex = uint64_t;
class MinishardIndexReadOperationState;
using MinishardIndexReadOperationStateBase =
internal_kvstore_batch::BatchReadEntry<
MinishardIndexKeyValueStore,
internal_kvstore_batch::ReadRequest<MinishardIndex>,
ShardIndex, kvstore::ReadGenerationConditions>;
;
class MinishardIndexReadOperationState
: public MinishardIndexReadOperationStateBase,
public internal::AtomicReferenceCount<MinishardIndexReadOperationState> {
public:
explicit MinishardIndexReadOperationState(BatchEntryKey&& batch_entry_key_)
: MinishardIndexReadOperationStateBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<MinishardIndexReadOperationState>(
1) {}
private:
Batch retry_batch_{no_batch};
void Submit(Batch::View batch) override {
const auto& executor = driver().executor();
executor(
[this, batch = Batch(batch)] { this->ProcessBatch(std::move(batch)); });
}
void ProcessBatch(Batch batch) {
internal::IntrusivePtr<MinishardIndexReadOperationState> self(
this, internal::adopt_object_ref);
retry_batch_ = Batch::New();
auto minishard_fetch_batch = Batch::New();
for (auto& request : request_batch.requests) {
ProcessMinishard(batch, request, minishard_fetch_batch);
}
}
std::string ShardKey() {
const auto& sharding_spec = driver().sharding_spec();
return GetShardKey(sharding_spec, driver().key_prefix(),
std::get<ShardIndex>(batch_entry_key));
}
void ProcessMinishard(Batch::View batch, Request& request,
Batch minishard_fetch_batch) {
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions =
std::get<kvstore::ReadGenerationConditions>(this->batch_entry_key);
kvstore_read_options.staleness_bound = this->request_batch.staleness_bound;
auto key = std::get<MinishardIndex>(request);
kvstore_read_options.byte_range = OptionalByteRangeRequest{
static_cast<int64_t>(key * 16), static_cast<int64_t>((key + 1) * 16)};
kvstore_read_options.batch = batch;
auto shard_index_read_future = this->driver().base()->Read(
this->ShardKey(), std::move(kvstore_read_options));
shard_index_read_future.Force();
shard_index_read_future.ExecuteWhenReady(
[self = internal::IntrusivePtr<MinishardIndexReadOperationState>(this),
minishard_fetch_batch = std::move(minishard_fetch_batch),
&request](ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
minishard_fetch_batch = std::move(minishard_fetch_batch),
future = std::move(future)] {
OnShardIndexReady(std::move(self), request,
std::move(minishard_fetch_batch),
std::move(future.result()));
});
});
}
static void OnShardIndexReady(
internal::IntrusivePtr<MinishardIndexReadOperationState> self,
Request& request, Batch minishard_fetch_batch,
Result<kvstore::ReadResult>&& result) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
const auto set_error = [&](absl::Status status) {
byte_range_request.promise.SetResult(MaybeAnnotateStatus(
ConvertInvalidArgumentToFailedPrecondition(std::move(status)),
"Error retrieving shard index entry"));
};
TENSORSTORE_ASSIGN_OR_RETURN(auto&& read_result, result,
set_error(std::move(_)));
if (
read_result.aborted() ||
read_result.not_found()) {
byte_range_request.promise.SetResult(std::move(read_result));
return;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range, DecodeShardIndexEntry(read_result.value.Flatten()),
set_error(std::move(_)));
TENSORSTORE_ASSIGN_OR_RETURN(
byte_range,
GetAbsoluteShardByteRange(byte_range, self->driver().sharding_spec()),
set_error(std::move(_)));
if (byte_range.size() == 0) {
read_result.value.Clear();
read_result.state = kvstore::ReadResult::kMissing;
byte_range_request.promise.SetResult(std::move(read_result));
return;
}
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions.if_equal =
std::move(read_result.stamp.generation);
kvstore_read_options.staleness_bound = self->request_batch.staleness_bound;
kvstore_read_options.byte_range = byte_range;
kvstore_read_options.batch = std::move(minishard_fetch_batch);
auto read_future = self->driver().base()->Read(
self->ShardKey(), std::move(kvstore_read_options));
read_future.Force();
read_future.ExecuteWhenReady(
[self = std::move(self),
&request](ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
future = std::move(future)]() mutable {
self->OnMinishardIndexReadReady(request,
std::move(future.result()));
});
});
}
void OnMinishardIndexReadReady(Request& request,
Result<kvstore::ReadResult>&& result) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
TENSORSTORE_ASSIGN_OR_RETURN(
auto&& read_result, result,
static_cast<void>(byte_range_request.promise.SetResult(
internal::ConvertInvalidArgumentToFailedPrecondition(
std::move(_)))));
if (read_result.aborted()) {
MakeRequest<MinishardIndexReadOperationState>(
driver(), std::get<ShardIndex>(batch_entry_key),
kvstore::ReadGenerationConditions(
std::get<kvstore::ReadGenerationConditions>(batch_entry_key)),
retry_batch_, read_result.stamp.time, std::move(request));
return;
}
byte_range_request.promise.SetResult(std::move(read_result));
}
};
}
Future<kvstore::ReadResult> MinishardIndexKeyValueStore::Read(
Key key, ReadOptions options) {
ChunkCombinedShardInfo combined_info;
if (key.size() != sizeof(combined_info)) {
return absl::InvalidArgumentError("Key does not specify a minishard");
}
std::memcpy(&combined_info, key.data(), sizeof(combined_info));
auto split_info = GetSplitShardInfo(sharding_spec_, combined_info);
if (options.byte_range != OptionalByteRangeRequest()) {
return absl::InvalidArgumentError("Byte ranges not supported");
}
auto [promise, future] = PromiseFuturePair<ReadResult>::Make();
MinishardIndexReadOperationState::MakeRequest<
MinishardIndexReadOperationState>(
*this, split_info.shard, std::move(options.generation_conditions),
options.batch, options.staleness_bound,
MinishardIndexReadOperationState::Request{{std::move(promise)},
split_info.minishard});
return std::move(future);
}
class MinishardIndexCache
: public internal::KvsBackedCache<MinishardIndexCache,
internal::AsyncCache> {
using Base =
internal::KvsBackedCache<MinishardIndexCache, internal::AsyncCache>;
public:
using ReadData = std::vector<MinishardIndexEntry>;
class Entry : public Base::Entry {
public:
using OwningCache = MinishardIndexCache;
ChunkSplitShardInfo shard_info() {
ChunkCombinedShardInfo combined_info;
assert(this->key().size() == sizeof(combined_info));
std::memcpy(&combined_info, this->key().data(), sizeof(combined_info));
return GetSplitShardInfo(GetOwningCache(*this).sharding_spec(),
combined_info);
}
size_t ComputeReadDataSizeInBytes(const void* read_data) override {
return internal::EstimateHeapUsage(
*static_cast<const ReadData*>(read_data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
std::shared_ptr<ReadData> read_data;
if (value) {
if (auto result = DecodeMinishardIndexAndAdjustByteRanges(
*value, GetOwningCache(*this).sharding_spec());
result.ok()) {
read_data = std::make_shared<ReadData>(std::move(*result));
} else {
execution::set_error(receiver,
ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(receiver, std::move(read_data));
});
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
explicit MinishardIndexCache(kvstore::DriverPtr base_kvstore,
Executor executor, std::string key_prefix,
const ShardingSpec& sharding_spec)
: Base(kvstore::DriverPtr(new MinishardIndexKeyValueStore(
std::move(base_kvstore), executor, std::move(key_prefix),
sharding_spec))) {}
MinishardIndexKeyValueStore* kvstore_driver() {
return static_cast<MinishardIndexKeyValueStore*>(
this->Base::kvstore_driver());
}
const ShardingSpec& sharding_spec() {
return kvstore_driver()->sharding_spec();
}
kvstore::Driver* base_kvstore_driver() { return kvstore_driver()->base(); }
const Executor& executor() { return kvstore_driver()->executor(); }
const std::string& key_prefix() { return kvstore_driver()->key_prefix(); }
};
MinishardAndChunkId GetMinishardAndChunkId(std::string_view key) {
assert(key.size() == 16);
return {absl::big_endian::Load64(key.data()),
{absl::big_endian::Load64(key.data() + 8)}};
}
class ShardedKeyValueStoreWriteCache
: public internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache>;
public:
using ReadData = EncodedChunks;
static std::string ShardToKey(ShardIndex shard) {
std::string key;
key.resize(sizeof(ShardIndex));
absl::big_endian::Store64(key.data(), shard);
return key;
}
static ShardIndex KeyToShard(std::string_view key) {
assert(key.size() == sizeof(ShardIndex));
return absl::big_endian::Load64(key.data());
}
class Entry : public Base::Entry {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
ShardIndex shard() { return KeyToShard(key()); }
size_t ComputeReadDataSizeInBytes(const void* data) override {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
EncodedChunks chunks;
if (value) {
if (auto result =
SplitShard(GetOwningCache(*this).sharding_spec(), *value);
result.ok()) {
chunks = std::move(*result);
} else {
execution::set_error(receiver,
ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(
receiver, std::make_shared<EncodedChunks>(std::move(chunks)));
});
}
void DoEncode(std::shared_ptr<const EncodedChunks> data,
EncodeReceiver receiver) override {
execution::set_value(
receiver, EncodeShard(GetOwningCache(*this).sharding_spec(), *data));
}
std::string GetKeyValueStoreKey() override {
auto& cache = GetOwningCache(*this);
return GetShardKey(cache.sharding_spec(), cache.key_prefix(),
this->shard());
}
};
class TransactionNode : public Base::TransactionNode,
public internal_kvstore::AtomicMultiPhaseMutation {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
using Base::TransactionNode::TransactionNode;
absl::Mutex& mutex() override { return this->mutex_; }
void PhaseCommitDone(size_t next_phase) override {}
internal::TransactionState::Node& GetTransactionNode() override {
return *this;
}
void Abort() override {
this->AbortRemainingPhases();
Base::TransactionNode::Abort();
}
std::string DescribeKey(std::string_view key) override {
auto& entry = GetOwningEntry(*this);
auto& cache = GetOwningCache(entry);
auto minishard_and_chunk_id = GetMinishardAndChunkId(key);
return tensorstore::StrCat(
"chunk ", minishard_and_chunk_id.chunk_id.value, " in minishard ",
minishard_and_chunk_id.minishard, " in ",
cache.kvstore_driver()->DescribeKey(entry.GetKeyValueStoreKey()));
}
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override;
void RecordEntryWritebackError(
internal_kvstore::ReadModifyWriteEntry& entry,
absl::Status error) override {
absl::MutexLock lock(&mutex_);
if (apply_status_.ok()) {
apply_status_ = std::move(error);
}
}
void Revoke() override {
Base::TransactionNode::Revoke();
{ UniqueWriterLock(*this); }
this->RevokeAllEntries();
}
void WritebackSuccess(ReadState&& read_state) override;
void WritebackError() override;
void InvalidateReadState() override;
bool MultiPhaseReadsCommitted() override { return this->reads_committed_; }
void Read(
internal_kvstore::ReadModifyWriteEntry& entry,
kvstore::ReadModifyWriteTarget::TransactionalReadOptions&& options,
kvstore::ReadModifyWriteTarget::ReadReceiver&& receiver) override {
this->AsyncCache::TransactionNode::Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(
GetOwningCache(*this).executor(),
[&entry,
if_not_equal =
std::move(options.generation_conditions.if_not_equal),
receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
execution::set_error(receiver, future.result().status());
return;
}
execution::submit(HandleShardReadSuccess(entry, if_not_equal),
receiver);
}));
}
static Result<kvstore::ReadResult> HandleShardReadSuccess(
internal_kvstore::ReadModifyWriteEntry& entry,
const StorageGeneration& if_not_equal) {
auto& self = static_cast<TransactionNode&>(entry.multi_phase());
TimestampedStorageGeneration stamp;
std::shared_ptr<const EncodedChunks> encoded_chunks;
{
AsyncCache::ReadLock<EncodedChunks> lock{self};
stamp = lock.stamp();
encoded_chunks = lock.shared_data();
}
if (!StorageGeneration::IsUnknown(stamp.generation) &&
stamp.generation == if_not_equal) {
return kvstore::ReadResult::Unspecified(std::move(stamp));
}
if (StorageGeneration::IsDirty(stamp.generation)) {
stamp.generation =
StorageGeneration::AddLayer(std::move(stamp.generation));
}
auto* chunk =
FindChunk(*encoded_chunks, GetMinishardAndChunkId(entry.key_));
if (!chunk) {
return kvstore::ReadResult::Missing(std::move(stamp));
} else {
TENSORSTORE_ASSIGN_OR_RETURN(
absl::Cord value,
DecodeData(chunk->encoded_data,
GetOwningCache(self).sharding_spec().data_encoding));
return kvstore::ReadResult::Value(std::move(value), std::move(stamp));
}
}
void Writeback(internal_kvstore::ReadModifyWriteEntry& entry,
internal_kvstore::ReadModifyWriteEntry& source_entry,
kvstore::ReadResult&& read_result) override {
auto& value = read_result.value;
if (read_result.state == kvstore::ReadResult::kValue) {
value = EncodeData(value,
GetOwningCache(*this).sharding_spec().data_encoding);
}
internal_kvstore::AtomicMultiPhaseMutation::Writeback(
entry, entry, std::move(read_result));
}
ApplyReceiver apply_receiver_;
ApplyOptions apply_options_;
absl::Status apply_status_;
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
explicit ShardedKeyValueStoreWriteCache(
internal::CachePtr<MinishardIndexCache> minishard_index_cache,
GetMaxChunksPerShardFunction get_max_chunks_per_shard)
: Base(kvstore::DriverPtr(minishard_index_cache->base_kvstore_driver())),
minishard_index_cache_(std::move(minishard_index_cache)),
get_max_chunks_per_shard_(std::move(get_max_chunks_per_shard)) {}
const ShardingSpec& sharding_spec() const {
return minishard_index_cache()->s | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace zlib = ::tensorstore::zlib;
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Batch;
using ::tensorstore::Future;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::Result;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultAborted;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::neuroglancer_uint64_sharded::ChunkIdToKey;
using ::tensorstore::neuroglancer_uint64_sharded::GetShardedKeyValueStore;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
absl::Cord Bytes(std::initializer_list<unsigned char> x) {
return absl::Cord(std::string(x.begin(), x.end()));
}
std::string GetChunkKey(uint64_t chunk_id) { return ChunkIdToKey({chunk_id}); }
class GetUint64Key {
public:
GetUint64Key(bool sequential) : sequential_(sequential) {}
std::string operator()(std::string key) const {
auto it = key_to_uint64_.find(key);
if (it == key_to_uint64_.end()) {
while (true) {
auto x = sequential_ ? next_chunk_id_++ : absl::Uniform<uint64_t>(gen_);
if (uint64_to_key_.emplace(x, key).second) {
it = key_to_uint64_.emplace(key, x).first;
break;
}
}
}
return GetChunkKey(it->second);
}
private:
bool sequential_;
mutable uint64_t next_chunk_id_ = 0;
mutable absl::BitGen gen_;
mutable absl::flat_hash_map<std::string, uint64_t> key_to_uint64_;
mutable absl::flat_hash_map<uint64_t, std::string> uint64_to_key_;
};
tensorstore::Executor GetExecutor(std::string_view executor_name) {
if (executor_name == "inline") return tensorstore::InlineExecutor{};
return tensorstore::internal::DetachedThreadPool(2);
}
struct BasicFunctionalityTestOptions {
std::string_view executor_name = "thread_pool";
bool sequential_ids = false;
std::string_view hash = "identity";
std::string_view data_encoding = "raw";
std::string_view minishard_index_encoding = "raw";
bool all_zero_bits = false;
};
void TestReadWriteOps(BasicFunctionalityTestOptions options) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", options.hash},
{"preshift_bits", options.all_zero_bits ? 0 : 1},
{"minishard_bits", options.all_zero_bits ? 0 : 2},
{"shard_bits", options.all_zero_bits ? 0 : 3},
{"data_encoding", options.data_encoding},
{"minishard_index_encoding", options.minishard_index_encoding}};
auto cache_pool = CachePool::Make(kSmallCacheLimits);
auto base_kv_store = tensorstore::GetMemoryKeyValueStore();
auto sharding_spec = ShardingSpec::FromJson(sharding_spec_json).value();
SCOPED_TRACE(options.executor_name);
SCOPED_TRACE(sharding_spec_json.dump());
auto store = GetShardedKeyValueStore(
base_kv_store, GetExecutor(options.executor_name), "prefix",
sharding_spec, CachePool::WeakPtr(cache_pool));
GetUint64Key get_key_fn(options.sequential_ids);
tensorstore::internal::TestKeyValueReadWriteOps(store, get_key_fn);
}
TEST(Uint64ShardedKeyValueStoreTest, BasicFunctionality) {
{
BasicFunctionalityTestOptions options;
TestReadWriteOps(options);
options.sequential_ids = true;
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.hash = "murmurhash3_x86_128";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.data_encoding = "gzip";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.minishard_index_encoding = "gzip";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.all_zero_bits = true;
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.executor_name = "inline";
TestReadWriteOps(options);
}
}
TEST(Uint64ShardedKeyValueStoreTest, DescribeKey) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
for (const auto& [key, description] :
std::vector<std::pair<uint64_t, std::string>>{
{0, "chunk 0 in minishard 0 in \"prefix/0.shard\""},
{1, "chunk 1 in minishard 1 in \"prefix/0.shard\""},
{2, "chunk 2 in minishard 0 in \"prefix/1.shard\""},
{3, "chunk 3 in minishard 1 in \"prefix/1.shard\""},
}) {
EXPECT_EQ(description, store->DescribeKey(GetChunkKey(key)));
}
}
class RawEncodingTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 0},
{"shard_bits", 0},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
};
TEST_F(RawEncodingTest, MultipleUnconditionalWrites) {
std::vector<absl::Cord> values{absl::Cord("abc"), absl::Cord("aaaaa"),
absl::Cord("efgh")};
std::vector<Future<TimestampedStorageGeneration>> futures;
auto key = GetChunkKey(10);
tensorstore::Transaction txn(tensorstore::isolated);
for (auto value : values) {
futures.push_back(kvstore::WriteCommitted(KvStore{store, txn}, key, value));
}
txn.CommitAsync().IgnoreFuture();
std::vector<Result<TimestampedStorageGeneration>> results;
for (const auto& future : futures) {
results.push_back(future.result());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto shard_read, base_kv_store->Read("prefix/0.shard").result());
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
for (size_t i = 0; i < results.size(); ++i) {
if (results[i] && results[i]->generation == shard_read.stamp.generation) {
EXPECT_THAT(store->Read(key).result(),
MatchesKvsReadResult(values[i], results[i]->generation));
}
}
}
TEST_F(RawEncodingTest, List) {
std::map<std::string, absl::Cord> values{
{GetChunkKey(1), absl::Cord("a")},
{GetChunkKey(2), absl::Cord("bc")},
{GetChunkKey(3), absl::Cord("def")},
{GetChunkKey(10), absl::Cord("xyz")}};
for (auto [key, value] : values) {
TENSORSTORE_EXPECT_OK(store->Write(key, value));
}
EXPECT_THAT(tensorstore::internal::GetMap(store),
::testing::Optional(::testing::ElementsAreArray(values)));
}
TEST_F(RawEncodingTest, WritesAndDeletes) {
StorageGeneration gen1, gen2, gen3;
{
tensorstore::Transaction txn(tensorstore::isolated);
auto init_future1 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(1), absl::Cord("a"));
auto init_future2 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(2), absl::Cord("bc"));
auto init_future3 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(3), absl::Cord("def"));
txn.CommitAsync().IgnoreFuture();
gen1 = init_future1.value().generation;
gen2 = init_future2.value().generation;
gen3 = init_future3.value().generation;
}
tensorstore::Transaction txn(tensorstore::isolated);
auto future1 = kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()});
auto future2 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(2),
absl::Cord("ww"), {gen2});
auto future3 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(2),
absl::Cord("xx"), {gen2});
auto future4 =
kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(4),
absl::Cord("zz"), {StorageGeneration::NoValue()});
auto future5 =
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(3), {gen3});
txn.CommitAsync().IgnoreFuture();
EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto shard_read, base_kv_store->Read("prefix/0.shard").result());
EXPECT_THAT(
std::vector({future2.result(), future3.result()}),
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResult(absl::Cord("a")));
EXPECT_THAT(store->Read(GetChunkKey(2)).result(),
MatchesKvsReadResult(
!StorageGeneration::IsUnknown(future2.result()->generation)
? absl::Cord("ww")
: absl::Cord("xx")));
EXPECT_THAT(store->Read(GetChunkKey(3)).result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(store->Read(GetChunkKey(4)).result(),
MatchesKvsReadResult(absl::Cord("zz")));
}
std::vector<std::vector<Result<TimestampedStorageGeneration>>>
TestOrderDependentWrites(
std::function<void()> init,
std::function<Future<TimestampedStorageGeneration>()> op0,
std::function<Future<TimestampedStorageGeneration>()> op1,
std::function<void()> finalize) {
std::vector<std::vector<Result<TimestampedStorageGeneration>>> all_results;
for (int i = 0; i < 2; ++i) {
std::vector<Future<TimestampedStorageGeneration>> futures(2);
init();
if (i == 0) {
futures[0] = op0();
futures[1] = op1();
} else {
futures[1] = op1();
futures[0] = op0();
}
finalize();
all_results.push_back({futures[0].result(), futures[1].result()});
}
return all_results;
}
TEST_F(RawEncodingTest, WriteThenDelete) {
TENSORSTORE_ASSERT_OK(store->Write(GetChunkKey(1), absl::Cord("a")).result());
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResult(absl::Cord("a")));
TENSORSTORE_ASSERT_OK(store->Delete(GetChunkKey(1)).result());
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResultNotFound());
}
TEST_F(RawEncodingTest, MultipleDeleteExisting) {
StorageGeneration gen;
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
gen = store->Write(GetChunkKey(1), absl::Cord("a"))
.value()
.generation;
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{gen});
},
[&] {
return kvstore::DeleteCommitted(
KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::UnorderedElementsAre(
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(
StorageGeneration::NoValue())),
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()),
MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) {
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
store->Delete(GetChunkKey(0)).value();
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(0),
absl::Cord("a"));
},
[&] {
return kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(0), absl::Cord("b"),
{StorageGeneration::FromString("g")});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::Each(::testing::ElementsAre(
MatchesTimestampedStorageGeneration(
::testing::AllOf(::testing::Not(StorageGeneration::NoValue()),
::testing::Not(StorageGeneration::Invalid()))),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, MultipleDeleteNonExisting) {
tensorstore::Transaction txn(tensorstore::isolated);
std::vector futures{
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()}),
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()})};
txn.CommitAsync().IgnoreFuture();
std::vector results{futures[0].result(), futures[1].result()};
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue())));
}
TEST_F(RawEncodingTest, ShardIndexTooShort) {
base_kv_store->Write("prefix/0.shard", Bytes({1, 2, 3})).value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Requested byte range \\[0, 16\\) is not valid for value of size 3"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Existing shard has size 3, but expected at least: 16"));
}
TEST_F(RawEncodingTest, ShardIndexInvalidByteRange) {
base_kv_store
->Write("prefix/0.shard",
Bytes({10, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Shard index specified invalid byte range: \\[10, 2\\)"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Shard index specified invalid byte range: \\[10, 2\\)"));
}
TEST_F(RawEncodingTest, ShardIndexByteRangeOverflow) {
base_kv_store
->Write("prefix/0.shard",
Bytes({
10, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Byte range .* relative to the end of "
"the shard index \\(16\\) is not valid"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Byte range .* relative to the end of "
"the shard index \\(16\\) is not valid"));
}
TEST_F(RawEncodingTest, MinishardIndexOutOfRange) {
base_kv_store
->Write("prefix/0.shard",
Bytes({0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Requested byte range \\[16, 64\\) is "
"not valid for value of size 16"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Requested byte range .* is not valid for value of size 16"));
}
TEST_F(RawEncodingTest, MinishardIndexInvalidSize) {
base_kv_store
->Write("prefix/0.shard",
Bytes({0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Invalid minishard index length: 1"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing minishard index for minishard 0: "
"Invalid minishard index length: 1"));
}
TEST_F(RawEncodingTest, MinishardIndexByteRangeOverflow) {
base_kv_store
->Write("prefix/0.shard",
Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
24, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error decoding minishard index entry "
"for chunk 10: Byte range .* relative to the end "
"of the shard index \\(16\\) is not valid"));
}
TEST_F(RawEncodingTest, MinishardIndexEntryByteRangeOutOfRange) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
24, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
200, 0, 0, 0, 0, 0, 0, 0,
}))
.value();
EXPECT_THAT(store->Write(GetChunkKey(1), absl::Cord("x")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Invalid existing byte range for chunk 10: "
"Requested byte range .* is not valid for value of size .*"));
}
TEST_F(RawEncodingTest, MinishardIndexWithDuplicateChunkId) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
48, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
}))
.value();
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Chunk 10 occurs more than once in the minishard "
"index for minishard 0"));
}
class GzipEncodingTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 0},
{"shard_bits", 0},
{"data_encoding", "gzip"},
{"minishard_index_encoding", "gzip"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
};
TEST_F(GzipEncodingTest, CorruptMinishardGzipEncoding) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error decoding zlib-compressed data"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing minishard index for minishard 0: "
"Error decoding zlib-compressed data"));
}
TEST_F(GzipEncodingTest, CorruptDataGzipEncoding) {
absl::Cord shard_data("abc");
zlib::Options zlib_options;
zlib_options.use_gzip_header = true;
zlib::Encode(Bytes({
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
}),
&shard_data, zlib_options);
const unsigned char n = static_cast<unsigned char>(shard_data.size());
absl::Cord temp = Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
n, 0, 0, 0, 0, 0, 0, 0,
});
temp.Append(shard_data);
TENSORSTORE_ASSERT_OK(base_kv_store->Write("prefix/0.shard", temp));
EXPECT_THAT(store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error decoding zlib-compressed data"));
}
class UnderlyingKeyValueStoreTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr GetStore(
tensorstore::neuroglancer_uint64_sharded::GetMaxChunksPerShardFunction
get_max_chunks_per_shard = {}) {
return GetShardedKeyValueStore(
mock_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool), std::move(get_max_chunks_per_shard));
}
kvstore::DriverPtr store = GetStore();
};
TEST_F(UnderlyingKeyValueStoreTest, Read) {
absl::Time init_time = UniqueNow();
absl::Time minishard_index_time;
{
auto future = store->Read(GetChunkKey(0x50), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(init_time));
req.promise.SetResult(ReadResult::Value(
Bytes({
5, 0, 0, 0, 0, 0, 0, 0,
31, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(37, 63), req.options.byte_range);
minishard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Value(
Bytes({
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), minishard_index_time}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 37), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult::Value(Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x60), o |
608 | cpp | google/tensorstore | murmurhash3 | tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3_test.cc | #ifndef TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_MURMURHASH3_H_
#define TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_MURMURHASH3_H_
#include <cstdint>
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
void MurmurHash3_x86_128Hash64Bits(uint64_t input, uint32_t h[4]);
}
}
#endif
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include <cstdint>
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
constexpr uint32_t MurmurHash3_x86_128Mix(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
constexpr uint32_t RotateLeft(uint32_t x, int r) {
return (x << r) | (x >> (32 - r));
}
}
void MurmurHash3_x86_128Hash64Bits(uint64_t input, uint32_t h[4]) {
uint64_t h1 = h[0], h2 = h[1], h3 = h[2], h4 = h[3];
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t low = static_cast<uint32_t>(input);
const uint32_t high = input >> 32;
uint32_t k2 = high * c2;
k2 = RotateLeft(k2, 16);
k2 *= c3;
h2 ^= k2;
uint32_t k1 = low * c1;
k1 = RotateLeft(k1, 15);
k1 *= c2;
h1 ^= k1;
const uint32_t len = 8;
h1 ^= len;
h2 ^= len;
h3 ^= len;
h4 ^= len;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 = MurmurHash3_x86_128Mix(h1);
h2 = MurmurHash3_x86_128Mix(h2);
h3 = MurmurHash3_x86_128Mix(h3);
h4 = MurmurHash3_x86_128Mix(h4);
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h[0] = h1;
h[1] = h2;
h[2] = h3;
h[3] = h4;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::neuroglancer_uint64_sharded::MurmurHash3_x86_128Hash64Bits;
TEST(MurmurHash3Test, Basic) {
uint32_t h[4];
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000e028ae41, 0x000000004772b084,
0x000000004772b084, 0x000000004772b084));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000005ad58a7e, 0x0000000054337108,
0x0000000054337108, 0x0000000054337108));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x0000000064010da2, 0x0000000062e8bc17,
0x0000000062e8bc17, 0x0000000062e8bc17));
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x0000000016d4ce9a, 0x00000000e8bd67d6,
0x00000000e8bd67d6, 0x00000000e8bd67d6));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000004b7ab8c6, 0x00000000eb555955,
0x00000000eb555955, 0x00000000eb555955));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000eb2301be, 0x0000000048e12494,
0x0000000048e12494, 0x0000000048e12494));
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000005119f47a, 0x00000000c20b94f9,
0x00000000c20b94f9, 0x00000000c20b94f9));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000d6b51bca, 0x00000000a25ad86b,
0x00000000a25ad86b, 0x00000000a25ad86b));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000002d83d9c7, 0x00000000082115eb,
0x00000000082115eb, 0x00000000082115eb));
}
} |
609 | cpp | google/tensorstore | uint64_sharded_encoder | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder_test.cc | #ifndef TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_UINT64_SHARDED_ENCODER_H_
#define TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_UINT64_SHARDED_ENCODER_H_
#include <stdint.h>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
absl::Cord EncodeMinishardIndex(
span<const MinishardIndexEntry> minishard_index);
absl::Cord EncodeShardIndex(span<const ShardIndexEntry> shard_index);
class ShardEncoder {
public:
using WriteFunction = std::function<absl::Status(const absl::Cord& buffer)>;
explicit ShardEncoder(const ShardingSpec& sharding_spec,
WriteFunction write_function);
explicit ShardEncoder(const ShardingSpec& sharding_spec, absl::Cord& out);
absl::Status WriteIndexedEntry(uint64_t minishard, ChunkId chunk_id,
const absl::Cord& data, bool compress);
Result<ByteRange> WriteUnindexedEntry(uint64_t minishard,
const absl::Cord& data, bool compress);
Result<absl::Cord> Finalize();
const ShardingSpec& sharding_spec() const { return sharding_spec_; }
~ShardEncoder();
private:
absl::Status FinalizeMinishard();
ShardingSpec sharding_spec_;
WriteFunction write_function_;
std::vector<MinishardIndexEntry> minishard_index_;
std::vector<ShardIndexEntry> shard_index_;
uint64_t cur_minishard_;
int64_t data_file_offset_;
};
std::optional<absl::Cord> EncodeShard(const ShardingSpec& spec,
span<const EncodedChunk> chunks);
absl::Cord EncodeData(const absl::Cord& input,
ShardingSpec::DataEncoding encoding);
}
}
#endif
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/internal/endian.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
absl::Cord EncodeMinishardIndex(
span<const MinishardIndexEntry> minishard_index) {
internal::FlatCordBuilder builder(minishard_index.size() * 24);
ChunkId prev_chunk_id{0};
int64_t prev_offset = 0;
for (ptrdiff_t i = 0; i < minishard_index.size(); ++i) {
const auto& e = minishard_index[i];
absl::little_endian::Store64(builder.data() + i * 8,
e.chunk_id.value - prev_chunk_id.value);
absl::little_endian::Store64(
builder.data() + minishard_index.size() * 8 + i * 8,
e.byte_range.inclusive_min - prev_offset);
absl::little_endian::Store64(
builder.data() + minishard_index.size() * 16 + i * 8,
e.byte_range.exclusive_max - e.byte_range.inclusive_min);
prev_chunk_id = e.chunk_id;
prev_offset = e.byte_range.exclusive_max;
}
return std::move(builder).Build();
}
absl::Cord EncodeShardIndex(span<const ShardIndexEntry> shard_index) {
internal::FlatCordBuilder builder(shard_index.size() * 16);
for (ptrdiff_t i = 0; i < shard_index.size(); ++i) {
const auto& e = shard_index[i];
absl::little_endian::Store64(builder.data() + i * 16, e.inclusive_min);
absl::little_endian::Store64(builder.data() + i * 16 + 8, e.exclusive_max);
}
return std::move(builder).Build();
}
ShardEncoder::ShardEncoder(const ShardingSpec& sharding_spec,
WriteFunction write_function)
: sharding_spec_(sharding_spec),
write_function_(std::move(write_function)),
shard_index_(static_cast<size_t>(1) << sharding_spec_.minishard_bits),
cur_minishard_(0),
data_file_offset_(0) {}
ShardEncoder::ShardEncoder(const ShardingSpec& sharding_spec, absl::Cord& out)
: ShardEncoder(sharding_spec, [&out](const absl::Cord& buffer) {
out.Append(buffer);
return absl::OkStatus();
}) {}
namespace {
Result<int64_t> EncodeData(
const absl::Cord& input, ShardingSpec::DataEncoding encoding,
absl::FunctionRef<absl::Status(const absl::Cord& buffer)> write_function) {
auto encoded = EncodeData(input, encoding);
if (auto status = write_function(encoded); status.ok()) {
return encoded.size();
} else {
return status;
}
}
}
absl::Status ShardEncoder::FinalizeMinishard() {
if (minishard_index_.empty()) return absl::OkStatus();
auto uncompressed_minishard_index = EncodeMinishardIndex(minishard_index_);
TENSORSTORE_ASSIGN_OR_RETURN(
auto num_bytes,
EncodeData(uncompressed_minishard_index,
sharding_spec_.minishard_index_encoding, write_function_));
shard_index_[cur_minishard_] = {data_file_offset_,
data_file_offset_ + num_bytes};
data_file_offset_ += num_bytes;
minishard_index_.clear();
return absl::OkStatus();
}
Result<absl::Cord> ShardEncoder::Finalize() {
TENSORSTORE_RETURN_IF_ERROR(FinalizeMinishard());
return EncodeShardIndex(shard_index_);
}
Result<ByteRange> ShardEncoder::WriteUnindexedEntry(std::uint64_t minishard,
const absl::Cord& data,
bool compress) {
if (minishard != cur_minishard_) {
if (minishard < cur_minishard_) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Minishard ", minishard,
" cannot be written after ", cur_minishard_));
}
TENSORSTORE_RETURN_IF_ERROR(FinalizeMinishard());
cur_minishard_ = minishard;
}
std::string output;
auto start_offset = data_file_offset_;
TENSORSTORE_ASSIGN_OR_RETURN(
auto num_bytes, EncodeData(data,
compress ? sharding_spec_.data_encoding
: ShardingSpec::DataEncoding::raw,
write_function_));
data_file_offset_ += num_bytes;
return ByteRange{start_offset, data_file_offset_};
}
absl::Status ShardEncoder::WriteIndexedEntry(uint64_t minishard,
ChunkId chunk_id,
const absl::Cord& data,
bool compress) {
TENSORSTORE_ASSIGN_OR_RETURN(auto byte_range,
WriteUnindexedEntry(minishard, data, compress));
minishard_index_.push_back({chunk_id, byte_range});
return absl::OkStatus();
}
ShardEncoder::~ShardEncoder() = default;
std::optional<absl::Cord> EncodeShard(const ShardingSpec& spec,
span<const EncodedChunk> chunks) {
absl::Cord shard_data;
ShardEncoder encoder(spec, shard_data);
for (const auto& chunk : chunks) {
TENSORSTORE_CHECK_OK(
encoder.WriteIndexedEntry(chunk.minishard_and_chunk_id.minishard,
chunk.minishard_and_chunk_id.chunk_id,
chunk.encoded_data, false));
}
auto shard_index = encoder.Finalize().value();
if (shard_data.empty()) return std::nullopt;
shard_index.Append(shard_data);
return shard_index;
}
absl::Cord EncodeData(const absl::Cord& input,
ShardingSpec::DataEncoding encoding) {
if (encoding == ShardingSpec::DataEncoding::raw) {
return input;
}
absl::Cord compressed;
zlib::Options options;
options.level = 9;
options.use_gzip_header = true;
zlib::Encode(input, &compressed, options);
return compressed;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace zlib = tensorstore::zlib;
using ::tensorstore::neuroglancer_uint64_sharded::EncodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::EncodeShardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardEncoder;
using ::tensorstore::neuroglancer_uint64_sharded::ShardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
absl::Cord Bytes(std::vector<unsigned char> bytes) {
return absl::Cord(std::string_view(
reinterpret_cast<const char*>(bytes.data()), bytes.size()));
}
TEST(EncodeMinishardIndexTest, Empty) {
auto out = EncodeMinishardIndex({});
EXPECT_EQ("", out);
}
TEST(EncodeMinishardIndexTest, SingleEntry) {
auto out = EncodeMinishardIndex(
std::vector<MinishardIndexEntry>{{{0x0123456789abcdef}, {0x11, 0x23}}});
EXPECT_THAT(out, Bytes({
0xef, 0xcd, 0xab, 0x89, 0x67, 0x45, 0x23, 0x01,
0x11, 0, 0, 0, 0, 0, 0, 0,
0x12, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(EncodeMinishardIndexTest, MultipleEntries) {
auto out = EncodeMinishardIndex(std::vector<MinishardIndexEntry>{
{{1}, {3, 10}},
{{7}, {12, 15}},
});
EXPECT_THAT(out, Bytes({
1, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0,
7, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(EncodeShardIndexTest, Basic) {
std::vector<ShardIndexEntry> shard_index{{1, 5}, {7, 10}};
auto out = EncodeShardIndex(shard_index);
EXPECT_THAT(out, Bytes({
1, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
7, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(ShardEncoderTest, Raw) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 0},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
absl::Cord encoded_shard_data;
ShardEncoder shard_encoder(sharding_spec, encoded_shard_data);
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {2},
Bytes({1, 2, 3, 4}),
false));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {8},
Bytes({6, 7, 8}),
false));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(1, {3}, Bytes({9, 10}),
false));
auto encoded_shard_index = shard_encoder.Finalize().value();
EXPECT_THAT(encoded_shard_data,
Bytes({
1, 2, 3, 4,
6, 7, 8,
2, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
9, 10,
3, 0, 0, 0, 0, 0, 0, 0,
55, 0, 0, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0,
}));
EXPECT_THAT(encoded_shard_index,
Bytes({
7, 0, 0, 0, 0, 0, 0, 0,
55, 0, 0, 0, 0, 0, 0, 0,
57, 0, 0, 0, 0, 0, 0, 0,
81, 0, 0, 0, 0, 0, 0, 0,
}));
}
TEST(ShardEncoderTest, Gzip) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 0},
{"data_encoding", "gzip"},
{"minishard_index_encoding", "gzip"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
absl::Cord encoded_shard_data;
ShardEncoder shard_encoder(sharding_spec, encoded_shard_data);
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {2},
Bytes({1, 2, 3, 4}),
true));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(0, {8},
Bytes({6, 7, 8}),
true));
TENSORSTORE_ASSERT_OK(shard_encoder.WriteIndexedEntry(1, {3}, Bytes({9, 10}),
false));
absl::Cord encoded_shard_index = shard_encoder.Finalize().value();
absl::Cord expected_shard_data;
zlib::Options options{9, true};
std::vector<ShardIndexEntry> shard_index(2);
{
std::vector<MinishardIndexEntry> minishard_index(2);
minishard_index[0].chunk_id = {2};
minishard_index[0].byte_range.inclusive_min = expected_shard_data.size();
zlib::Encode(Bytes({1, 2, 3, 4}), &expected_shard_data, options);
minishard_index[0].byte_range.exclusive_max = expected_shard_data.size();
minishard_index[1].chunk_id = {8};
minishard_index[1].byte_range.inclusive_min = expected_shard_data.size();
zlib::Encode(Bytes({6, 7, 8}), &expected_shard_data, options);
minishard_index[1].byte_range.exclusive_max = expected_shard_data.size();
shard_index[0].inclusive_min = expected_shard_data.size();
zlib::Encode(EncodeMinishardIndex(minishard_index), &expected_shard_data,
options);
shard_index[0].exclusive_max = expected_shard_data.size();
}
{
std::vector<MinishardIndexEntry> minishard_index(1);
minishard_index[0].chunk_id = {3};
minishard_index[0].byte_range.inclusive_min = expected_shard_data.size();
expected_shard_data.Append(Bytes({9, 10}));
minishard_index[0].byte_range.exclusive_max = expected_shard_data.size();
shard_index[1].inclusive_min = expected_shard_data.size();
zlib::Encode(EncodeMinishardIndex(minishard_index), &expected_shard_data,
options);
shard_index[1].exclusive_max = expected_shard_data.size();
}
auto expected_shard_index = EncodeShardIndex(shard_index);
EXPECT_EQ(expected_shard_data, encoded_shard_data);
EXPECT_EQ(expected_shard_index, encoded_shard_index);
}
} |
610 | cpp | google/tensorstore | uint64_sharded | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_test.cc | #ifndef TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_UINT64_SHARDED_H_
#define TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_UINT64_SHARDED_H_
#include <ostream>
#include <string>
#include <string_view>
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
class ShardingSpec {
public:
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ShardingSpec,
internal_json_binding::NoOptions,
tensorstore::IncludeDefaults)
enum class HashFunction {
identity,
murmurhash3_x86_128,
};
friend std::ostream& operator<<(std::ostream& os, HashFunction x);
friend void to_json(::nlohmann::json& out,
HashFunction x);
enum class DataEncoding {
raw,
gzip,
};
friend std::ostream& operator<<(std::ostream& os, DataEncoding x);
friend std::ostream& operator<<(std::ostream& os, const ShardingSpec& x);
ShardingSpec() = default;
ShardingSpec(HashFunction hash_function, int preshift_bits,
int minishard_bits, int shard_bits, DataEncoding data_encoding,
DataEncoding minishard_index_encoding)
: hash_function(hash_function),
preshift_bits(preshift_bits),
minishard_bits(minishard_bits),
shard_bits(shard_bits),
data_encoding(data_encoding),
minishard_index_encoding(minishard_index_encoding) {}
HashFunction hash_function;
int preshift_bits;
int minishard_bits;
int shard_bits;
DataEncoding data_encoding = DataEncoding::raw;
DataEncoding minishard_index_encoding = DataEncoding::raw;
uint64_t num_shards() const { return static_cast<uint64_t>(1) << shard_bits; }
uint64_t num_minishards() const {
return static_cast<uint64_t>(1) << minishard_bits;
}
friend bool operator==(const ShardingSpec& a, const ShardingSpec& b);
friend bool operator!=(const ShardingSpec& a, const ShardingSpec& b) {
return !(a == b);
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.hash_function, x.preshift_bits, x.minishard_bits, x.shard_bits,
x.data_encoding, x.minishard_index_encoding);
};
};
TENSORSTORE_DECLARE_JSON_BINDER(DataEncodingJsonBinder,
ShardingSpec::DataEncoding,
internal_json_binding::NoOptions,
internal_json_binding::NoOptions)
std::string GetShardKey(const ShardingSpec& sharding_spec,
std::string_view prefix, uint64_t shard_number);
struct ChunkId {
uint64_t value;
friend bool operator==(ChunkId a, ChunkId b) { return a.value == b.value; }
friend bool operator!=(ChunkId a, ChunkId b) { return !(a == b); }
friend bool operator<(ChunkId a, ChunkId b) { return a.value < b.value; }
template <typename H>
friend H AbslHashValue(H h, ChunkId x) {
return H::combine(std::move(h), x.value);
}
};
uint64_t HashChunkId(ShardingSpec::HashFunction h, uint64_t key);
struct ChunkCombinedShardInfo {
uint64_t shard_and_minishard;
};
struct ChunkSplitShardInfo {
uint64_t minishard;
uint64_t shard;
};
ChunkCombinedShardInfo GetChunkShardInfo(const ShardingSpec& sharding_spec,
ChunkId chunk_id);
ChunkSplitShardInfo GetSplitShardInfo(const ShardingSpec& sharding_spec,
ChunkCombinedShardInfo combined_info);
ChunkCombinedShardInfo GetCombinedShardInfo(const ShardingSpec& sharding_spec,
ChunkSplitShardInfo split_info);
struct MinishardIndexEntry {
ChunkId chunk_id;
ByteRange byte_range;
friend bool operator==(const MinishardIndexEntry& a,
const MinishardIndexEntry& b) {
return a.chunk_id.value == b.chunk_id.value && a.byte_range == b.byte_range;
}
friend bool operator!=(const MinishardIndexEntry& a,
const MinishardIndexEntry& b) {
return !(a == b);
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.chunk_id, x.byte_range);
};
};
using ShardIndexEntry = ByteRange;
int64_t ShardIndexSize(const ShardingSpec& sharding_spec);
Result<ByteRange> GetAbsoluteShardByteRange(ByteRange relative_range,
const ShardingSpec& sharding_spec);
struct MinishardAndChunkId {
uint64_t minishard;
ChunkId chunk_id;
friend bool operator<(const MinishardAndChunkId& a,
const MinishardAndChunkId& b) {
return (a.minishard < b.minishard) ||
(a.minishard == b.minishard && a.chunk_id.value < b.chunk_id.value);
}
friend bool operator==(const MinishardAndChunkId& a,
const MinishardAndChunkId& b) {
return a.minishard == b.minishard && a.chunk_id.value == b.chunk_id.value;
}
friend bool operator!=(const MinishardAndChunkId& a,
const MinishardAndChunkId& b) {
return !(a == b);
}
};
struct EncodedChunk {
MinishardAndChunkId minishard_and_chunk_id;
absl::Cord encoded_data;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.minishard_and_chunk_id, x.encoded_data);
};
};
using EncodedChunks = std::vector<EncodedChunk>;
const EncodedChunk* FindChunk(span<const EncodedChunk> chunks,
MinishardAndChunkId minishard_and_chunk_id);
}
}
#endif
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include <algorithm>
#include "absl/base/optimization.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
namespace jb = tensorstore::internal_json_binding;
constexpr auto HashFunctionBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) {
using HashFunction = ShardingSpec::HashFunction;
return jb::Enum<HashFunction, const char*>({
{HashFunction::identity, "identity"},
{HashFunction::murmurhash3_x86_128, "murmurhash3_x86_128"},
})(is_loading, options, obj, j);
};
constexpr auto DefaultableDataEncodingJsonBinder =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
using DataEncoding = ShardingSpec::DataEncoding;
return jb::DefaultValue<jb::kAlwaysIncludeDefaults>(
[](auto* v) { *v = DataEncoding::raw; }, DataEncodingJsonBinder)(
is_loading, options, obj, j);
};
}
TENSORSTORE_DEFINE_JSON_BINDER(
DataEncodingJsonBinder, jb::Enum<ShardingSpec::DataEncoding, const char*>({
{ShardingSpec::DataEncoding::raw, "raw"},
{ShardingSpec::DataEncoding::gzip, "gzip"},
}))
std::ostream& operator<<(std::ostream& os, ShardingSpec::HashFunction x) {
return os << jb::ToJson(x, HashFunctionBinder).value();
}
void to_json(::nlohmann::json& out,
ShardingSpec::HashFunction x) {
out = jb::ToJson(x, HashFunctionBinder).value();
}
std::ostream& operator<<(std::ostream& os, ShardingSpec::DataEncoding x) {
return os << jb::ToJson(x, DataEncodingJsonBinder).value();
}
std::ostream& operator<<(std::ostream& os, const ShardingSpec& x) {
return os << jb::ToJson(x).value();
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ShardingSpec, [](auto is_loading,
const auto& options,
auto* obj, auto* j) {
return jb::Object(
jb::Member("@type",
jb::Constant([] { return "neuroglancer_uint64_sharded_v1"; })),
jb::Member("preshift_bits", jb::Projection(&ShardingSpec::preshift_bits,
jb::Integer<int>(0, 64))),
jb::Member("minishard_bits", jb::Projection(&ShardingSpec::minishard_bits,
jb::Integer<int>(0, 32))),
jb::Member("shard_bits",
jb::Dependent([](auto is_loading, const auto& options,
auto* obj, auto* j) {
return jb::Projection(
&ShardingSpec::shard_bits,
jb::Integer<int>(0, 64 - obj->minishard_bits));
})),
jb::Member("hash", jb::Projection(&ShardingSpec::hash_function,
HashFunctionBinder)),
jb::Member("data_encoding",
jb::Projection(&ShardingSpec::data_encoding,
DefaultableDataEncodingJsonBinder)),
jb::Member("minishard_index_encoding",
jb::Projection(&ShardingSpec::minishard_index_encoding,
DefaultableDataEncodingJsonBinder)))(
is_loading, options, obj, j);
})
bool operator==(const ShardingSpec& a, const ShardingSpec& b) {
return a.hash_function == b.hash_function &&
a.preshift_bits == b.preshift_bits &&
a.minishard_bits == b.minishard_bits && a.shard_bits == b.shard_bits &&
a.data_encoding == b.data_encoding &&
a.minishard_index_encoding == b.minishard_index_encoding;
}
std::string GetShardKey(const ShardingSpec& sharding_spec,
std::string_view prefix, uint64_t shard_number) {
return internal::JoinPath(
prefix,
absl::StrFormat("%0*x.shard", CeilOfRatio(sharding_spec.shard_bits, 4),
shard_number));
}
namespace {
constexpr uint64_t ShiftRightUpTo64(uint64_t x, int amount) {
if (amount == 64) return 0;
return x >> amount;
}
uint64_t GetLowBitMask(int num_bits) {
if (num_bits == 64) return ~uint64_t(0);
return (uint64_t(1) << num_bits) - 1;
}
}
uint64_t HashChunkId(ShardingSpec::HashFunction h, uint64_t key) {
switch (h) {
case ShardingSpec::HashFunction::identity:
return key;
case ShardingSpec::HashFunction::murmurhash3_x86_128: {
uint32_t out[4] = {0, 0, 0};
MurmurHash3_x86_128Hash64Bits(key, out);
return (static_cast<uint64_t>(out[1]) << 32) | out[0];
}
}
ABSL_UNREACHABLE();
}
ChunkCombinedShardInfo GetChunkShardInfo(const ShardingSpec& sharding_spec,
ChunkId chunk_id) {
ChunkCombinedShardInfo result;
const uint64_t hash_input =
ShiftRightUpTo64(chunk_id.value, sharding_spec.preshift_bits);
const uint64_t hash_output =
HashChunkId(sharding_spec.hash_function, hash_input);
result.shard_and_minishard =
hash_output &
GetLowBitMask(sharding_spec.minishard_bits + sharding_spec.shard_bits);
return result;
}
ChunkSplitShardInfo GetSplitShardInfo(const ShardingSpec& sharding_spec,
ChunkCombinedShardInfo combined_info) {
ChunkSplitShardInfo result;
result.minishard = combined_info.shard_and_minishard &
GetLowBitMask(sharding_spec.minishard_bits);
result.shard = ShiftRightUpTo64(combined_info.shard_and_minishard,
sharding_spec.minishard_bits) &
GetLowBitMask(sharding_spec.shard_bits);
return result;
}
ChunkCombinedShardInfo GetCombinedShardInfo(const ShardingSpec& sharding_spec,
ChunkSplitShardInfo split_info) {
ChunkCombinedShardInfo result;
result.shard_and_minishard = split_info.minishard;
if (sharding_spec.minishard_bits != 64) {
result.shard_and_minishard |=
(split_info.shard << sharding_spec.minishard_bits);
}
return result;
}
int64_t ShardIndexSize(const ShardingSpec& sharding_spec) {
return static_cast<int64_t>(16) << sharding_spec.minishard_bits;
}
Result<ByteRange> GetAbsoluteShardByteRange(ByteRange relative_range,
const ShardingSpec& sharding_spec) {
const int64_t offset = ShardIndexSize(sharding_spec);
ByteRange result;
if (internal::AddOverflow(relative_range.inclusive_min, offset,
&result.inclusive_min) ||
internal::AddOverflow(relative_range.exclusive_max, offset,
&result.exclusive_max)) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Byte range ", relative_range,
" relative to the end of the shard index (", offset, ") is not valid"));
}
return result;
}
const EncodedChunk* FindChunk(span<const EncodedChunk> chunks,
MinishardAndChunkId minishard_and_chunk_id) {
const auto chunk_it = std::lower_bound(
chunks.begin(), chunks.end(), minishard_and_chunk_id,
[](const auto& chunk, const auto& minishard_and_chunk_id) {
return chunk.minishard_and_chunk_id < minishard_and_chunk_id;
});
if (chunk_it == chunks.end() ||
chunk_it->minishard_and_chunk_id != minishard_and_chunk_id) {
return nullptr;
}
return &*chunk_it;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
TEST(ShardingSpecTest, Comparison) {
ShardingSpec a{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec b{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec c{
ShardingSpec::HashFunction::identity,
2,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec d{
ShardingSpec::HashFunction::identity,
1,
5,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec e{
ShardingSpec::HashFunction::identity,
1,
2,
9,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec f{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::gzip,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec g{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_EQ(e, e);
EXPECT_EQ(f, f);
EXPECT_EQ(g, g);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
EXPECT_NE(a, f);
EXPECT_NE(a, g);
}
TEST(ShardingSpecTest, ToJson) {
ShardingSpec a{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
EXPECT_EQ(::nlohmann::json({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "raw"},
{"minishard_index_encoding", "gzip"}}),
::nlohmann::json(a));
}
TEST(ShardingSpecTest, Parse) {
for (auto h : {ShardingSpec::HashFunction::identity,
ShardingSpec::HashFunction::murmurhash3_x86_128}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", ::nlohmann::json(h)},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "raw"},
{"minishard_index_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
h,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
}));
}
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
}));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::gzip,
ShardingSpec::DataEncoding::raw,
}));
for (const char* k :
{"@type", "hash", "preshift_bits", "minishard_bits", "shard_bits"}) {
::nlohmann::json j{{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}};
j.erase(k);
EXPECT_THAT(ShardingSpec::FromJson(j),
MatchesStatus(absl::StatusCode::kInvalidArgument));
j[k] = nullptr;
EXPECT_THAT(ShardingSpec::FromJson(j),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v2"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"neuroglancer_uint64_sharded_v2\".*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "invalid_hash"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"invalid_hash\".*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", 1234}}),
MatchesStatus(absl::StatusCode::kInvalidArgument, ".*1234.*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "invalid_encoding"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"invalid_encoding\".*"));
for (int i : {0, 1, 63, 64}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", i},
{"minishard_bits", 2},
{"shard_bits", 3}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
i,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 65, 66}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", i},
{"minishard_bits", 2},
{"shard_bits", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (int i : {0, 1, 31, 32}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", i},
{"shard_bits", 0}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
1,
i,
0,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 33, 34, 35}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", i},
{"shard_bits", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (int i : {0, 1, 64 - 8, 64 - 7}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 7},
{"shard_bits", i}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
1,
7,
i,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 64 - 6, 64 - 5, 65, 66}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 7},
{"shard_bits", i}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
EXPECT_THAT(ShardingSpec::FromJson("invalid"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(MinishardIndexEntryTest, Comparison) {
MinishardIndexEntry a{{1}, {2, 3}};
MinishardIndexEntry b{{1}, {3, 4}};
MinishardIndexEntry c{{2}, {2, 3}};
MinishardIndexEntry d{{2}, {3, 4}};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a == b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, c);
EXPECT_NE(b, d);
EXPECT_NE(c, d);
}
} |
611 | cpp | google/tensorstore | uint64_sharded_decoder | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder_test.cc | #ifndef TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_UINT64_SHARDED_DECODER_H_
#define TENSORSTORE_KVSTORE_NEUROGLANCER_UINT64_SHARDED_UINT64_SHARDED_DECODER_H_
#include <optional>
#include <string_view>
#include <vector>
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
Result<std::vector<MinishardIndexEntry>> DecodeMinishardIndex(
const absl::Cord& input, ShardingSpec::DataEncoding encoding);
std::optional<ByteRange> FindChunkInMinishard(
span<const MinishardIndexEntry> minishard_index, ChunkId chunk_id);
Result<absl::Cord> DecodeData(const absl::Cord& input,
ShardingSpec::DataEncoding encoding);
Result<ByteRange> DecodeShardIndexEntry(std::string_view input);
Result<std::vector<MinishardIndexEntry>>
DecodeMinishardIndexAndAdjustByteRanges(const absl::Cord& encoded,
const ShardingSpec& sharding_spec);
Result<EncodedChunks> SplitShard(const ShardingSpec& sharding_spec,
const absl::Cord& shard_data);
}
}
#endif
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/cord_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
Result<std::vector<MinishardIndexEntry>> DecodeMinishardIndex(
const absl::Cord& input, ShardingSpec::DataEncoding encoding) {
absl::Cord decoded_input;
if (encoding != ShardingSpec::DataEncoding::raw) {
TENSORSTORE_ASSIGN_OR_RETURN(decoded_input, DecodeData(input, encoding));
} else {
decoded_input = input;
}
if ((decoded_input.size() % 24) != 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid minishard index length: ", decoded_input.size()));
}
std::vector<MinishardIndexEntry> result(decoded_input.size() / 24);
static_assert(sizeof(MinishardIndexEntry) == 24);
auto decoded_flat = decoded_input.Flatten();
ChunkId chunk_id{0};
uint64_t byte_offset = 0;
for (size_t i = 0; i < result.size(); ++i) {
auto& entry = result[i];
chunk_id.value += absl::little_endian::Load64(decoded_flat.data() + i * 8);
entry.chunk_id = chunk_id;
byte_offset += absl::little_endian::Load64(decoded_flat.data() + i * 8 +
8 * result.size());
entry.byte_range.inclusive_min = byte_offset;
byte_offset += absl::little_endian::Load64(decoded_flat.data() + i * 8 +
16 * result.size());
entry.byte_range.exclusive_max = byte_offset;
if (!entry.byte_range.SatisfiesInvariants()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid byte range in minishard index for chunk ",
entry.chunk_id.value, ": ", entry.byte_range));
}
}
absl::c_sort(result,
[](const MinishardIndexEntry& a, const MinishardIndexEntry& b) {
return a.chunk_id.value < b.chunk_id.value;
});
return result;
}
std::optional<ByteRange> FindChunkInMinishard(
span<const MinishardIndexEntry> minishard_index, ChunkId chunk_id) {
auto it =
absl::c_lower_bound(minishard_index, chunk_id,
[](const MinishardIndexEntry& e, ChunkId chunk_id) {
return e.chunk_id.value < chunk_id.value;
});
if (it == minishard_index.end() || it->chunk_id.value != chunk_id.value) {
return std::nullopt;
}
return it->byte_range;
}
Result<absl::Cord> DecodeData(const absl::Cord& input,
ShardingSpec::DataEncoding encoding) {
if (encoding == ShardingSpec::DataEncoding::raw) {
return input;
}
absl::Cord uncompressed;
TENSORSTORE_RETURN_IF_ERROR(
zlib::Decode(input, &uncompressed, true));
return uncompressed;
}
Result<ByteRange> DecodeShardIndexEntry(std::string_view input) {
if (input.size() != 16) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Expected 16 bytes, but received: ", input.size(), " bytes"));
}
ByteRange r;
r.inclusive_min = absl::little_endian::Load64(input.data());
r.exclusive_max = absl::little_endian::Load64(input.data() + 8);
if (!r.SatisfiesInvariants()) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Shard index specified invalid byte range: ", r));
}
return r;
}
Result<std::vector<MinishardIndexEntry>>
DecodeMinishardIndexAndAdjustByteRanges(const absl::Cord& encoded,
const ShardingSpec& sharding_spec) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index,
DecodeMinishardIndex(encoded, sharding_spec.minishard_index_encoding));
for (auto& entry : minishard_index) {
auto result = GetAbsoluteShardByteRange(entry.byte_range, sharding_spec);
if (!result.ok()) {
return MaybeAnnotateStatus(
result.status(),
tensorstore::StrCat("Error decoding minishard index entry for chunk ",
entry.chunk_id.value));
}
entry.byte_range = std::move(result).value();
}
return minishard_index;
}
namespace {
absl::Status SplitMinishard(const ShardingSpec& sharding_spec,
const absl::Cord& shard_data, uint64_t minishard,
span<const MinishardIndexEntry> minishard_index,
std::vector<EncodedChunk>& chunks) {
std::optional<ChunkId> prev_chunk_id;
for (const auto& existing_entry : minishard_index) {
if (prev_chunk_id &&
existing_entry.chunk_id.value == prev_chunk_id->value) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Chunk ", existing_entry.chunk_id.value,
" occurs more than once in the minishard index "
"for minishard ",
minishard));
}
prev_chunk_id = existing_entry.chunk_id;
const auto GetChunkByteRange = [&]() -> Result<ByteRange> {
TENSORSTORE_RETURN_IF_ERROR(
OptionalByteRangeRequest(existing_entry.byte_range)
.Validate(shard_data.size()));
return existing_entry.byte_range;
};
TENSORSTORE_ASSIGN_OR_RETURN(
auto chunk_byte_range, GetChunkByteRange(),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Invalid existing byte range for chunk ",
existing_entry.chunk_id.value)));
chunks.push_back(
EncodedChunk{{minishard, existing_entry.chunk_id},
internal::GetSubCord(shard_data, chunk_byte_range)});
}
return absl::OkStatus();
}
}
Result<std::vector<EncodedChunk>> SplitShard(const ShardingSpec& sharding_spec,
const absl::Cord& shard_data) {
std::vector<EncodedChunk> chunks;
if (shard_data.empty()) return chunks;
const uint64_t num_minishards = sharding_spec.num_minishards();
if (shard_data.size() < num_minishards * 16) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Existing shard has size ", shard_data.size(),
", but expected at least: ", num_minishards * 16));
}
std::vector<char> shard_index(16 * num_minishards);
internal::CopyCordToSpan(shard_data, shard_index);
for (uint64_t minishard = 0; minishard < num_minishards; ++minishard) {
const auto GetMinishardIndexByteRange = [&]() -> Result<ByteRange> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index_byte_range,
DecodeShardIndexEntry(
std::string_view(shard_index.data() + 16 * minishard, 16)));
TENSORSTORE_ASSIGN_OR_RETURN(
minishard_index_byte_range,
GetAbsoluteShardByteRange(minishard_index_byte_range, sharding_spec));
TENSORSTORE_RETURN_IF_ERROR(
OptionalByteRangeRequest(minishard_index_byte_range)
.Validate(shard_data.size()));
return minishard_index_byte_range;
};
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_ibr, GetMinishardIndexByteRange(),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Error decoding existing shard index entry for minishard ",
minishard)));
if (minishard_ibr.size() == 0) continue;
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index,
DecodeMinishardIndexAndAdjustByteRanges(
internal::GetSubCord(shard_data, minishard_ibr), sharding_spec),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Error decoding existing minishard index for minishard ",
minishard)));
TENSORSTORE_RETURN_IF_ERROR(SplitMinishard(
sharding_spec, shard_data, minishard, minishard_index, chunks));
}
return chunks;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace zlib = tensorstore::zlib;
using ::tensorstore::MatchesStatus;
using ::tensorstore::neuroglancer_uint64_sharded::DecodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::EncodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
void TestEncodeMinishardRoundTrip(
std::vector<MinishardIndexEntry> minishard_index) {
auto out = EncodeMinishardIndex(minishard_index);
absl::Cord compressed;
zlib::Options options{9, true};
zlib::Encode(out, &compressed, options);
EXPECT_THAT(
DecodeMinishardIndex(out, ShardingSpec::DataEncoding::raw),
::testing::Optional(::testing::ElementsAreArray(minishard_index)));
EXPECT_THAT(
DecodeMinishardIndex(compressed, ShardingSpec::DataEncoding::gzip),
::testing::Optional(::testing::ElementsAreArray(minishard_index)));
}
TEST(DecodeMinishardIndexTest, Empty) {
TestEncodeMinishardRoundTrip({});
}
TEST(DecodeMinishardIndexTest, SingleEntry) {
TestEncodeMinishardRoundTrip({{{0x0123456789abcdef}, {0x11, 0x23}}});
}
TEST(DecodeMinishardIndexTest, MultipleEntries) {
TestEncodeMinishardRoundTrip({
{{1}, {3, 10}},
{{7}, {12, 15}},
});
}
TEST(DecodeMinishardIndexTest, InvalidGzip) {
EXPECT_THAT(
DecodeMinishardIndex(absl::Cord("abc"), ShardingSpec::DataEncoding::gzip),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error decoding zlib-compressed data"));
}
TEST(DecodeMinishardIndexTest, InvalidSizeRaw) {
EXPECT_THAT(
DecodeMinishardIndex(absl::Cord("abc"), ShardingSpec::DataEncoding::raw),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid minishard index length: 3"));
}
TEST(DecodeMinishardIndexTest, InvalidSizeGzip) {
absl::Cord temp;
zlib::Options options{9, true};
zlib::Encode(absl::Cord("abc"), &temp, options);
EXPECT_THAT(DecodeMinishardIndex(temp, ShardingSpec::DataEncoding::gzip),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid minishard index length: 3"));
}
TEST(DecodeMinishardIndexTest, InvalidInterval) {
std::vector<MinishardIndexEntry> minishard_index{{{3}, {1, 0}}};
auto encoded = EncodeMinishardIndex(minishard_index);
EXPECT_THAT(
DecodeMinishardIndex(encoded, ShardingSpec::DataEncoding::raw),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Invalid byte range in minishard index for chunk 3: \\[1, 0\\)"));
}
} |
612 | cpp | google/tensorstore | s3_endpoint | tensorstore/kvstore/s3/s3_endpoint.cc | tensorstore/kvstore/s3/s3_endpoint_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_S3_ENDPOINT_H_
#define TENSORSTORE_KVSTORE_S3_S3_ENDPOINT_H_
#include <memory>
#include <string>
#include <string_view>
#include <variant>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
struct S3EndpointRegion {
std::string endpoint;
std::string aws_region;
template <typename Sink>
friend void AbslStringify(Sink& sink, const S3EndpointRegion& ehr) {
absl::Format(&sink, "S3EndpointRegion{endpoint=%s, aws_region=%s}",
ehr.endpoint, ehr.aws_region);
}
friend bool operator==(const S3EndpointRegion& a, const S3EndpointRegion& b) {
return a.endpoint == b.endpoint && a.aws_region == b.aws_region;
}
friend bool operator!=(const S3EndpointRegion& a, const S3EndpointRegion& b) {
return !(a == b);
}
};
std::variant<absl::Status, S3EndpointRegion> ValidateEndpoint(
std::string_view bucket, std::string aws_region, std::string_view endpoint,
std::string host_header);
Future<S3EndpointRegion> ResolveEndpointRegion(
std::string bucket, std::string_view endpoint, std::string host_header,
std::shared_ptr<internal_http::HttpTransport> transport);
}
}
#endif
#include "tensorstore/kvstore/s3/s3_endpoint.h"
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/s3/validate.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kAmzBucketRegionHeader[] = "x-amz-bucket-region";
struct S3VirtualHostFormatter {
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("https:
aws_region);
}
};
struct S3PathFormatter {
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("https:
bucket);
}
};
struct S3CustomFormatter {
std::string endpoint;
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("%s/%s", endpoint, bucket);
}
};
template <typename Formatter>
struct ResolveHost {
std::string bucket;
std::string default_aws_region;
Formatter formatter;
void operator()(Promise<S3EndpointRegion> promise,
ReadyFuture<HttpResponse> ready) {
if (!promise.result_needed()) return;
auto& headers = ready.value().headers;
if (auto it = headers.find(kAmzBucketRegionHeader); it != headers.end()) {
promise.SetResult(S3EndpointRegion{
formatter.GetEndpoint(bucket, it->second),
it->second,
});
}
if (!default_aws_region.empty()) {
promise.SetResult(S3EndpointRegion{
formatter.GetEndpoint(bucket, default_aws_region),
default_aws_region,
});
}
promise.SetResult(absl::FailedPreconditionError(tensorstore::StrCat(
"Failed to resolve aws_region for bucket ", QuoteString(bucket))));
}
};
}
std::variant<absl::Status, S3EndpointRegion> ValidateEndpoint(
std::string_view bucket, std::string aws_region, std::string_view endpoint,
std::string host_header) {
ABSL_CHECK(!bucket.empty());
if (!host_header.empty() && endpoint.empty()) {
return absl::InvalidArgumentError(
"\"host_header\" cannot be set without also setting \"endpoint\"");
}
if (internal_kvstore_s3::ClassifyBucketName(bucket) ==
internal_kvstore_s3::BucketNameType::kOldUSEast1) {
if (!aws_region.empty() && aws_region != "us-east-1") {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Bucket ", QuoteString(bucket),
" requires aws_region \"us-east-1\", not ", QuoteString(aws_region)));
}
aws_region = "us-east-1";
}
if (endpoint.empty()) {
if (!aws_region.empty()) {
if (!absl::StrContains(bucket, ".")) {
S3VirtualHostFormatter formatter;
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
S3PathFormatter formatter;
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
return absl::OkStatus();
}
auto parsed = internal::ParseGenericUri(endpoint);
if (parsed.scheme != "http" && parsed.scheme != "https") {
return absl::InvalidArgumentError(
tensorstore::StrCat("Endpoint ", endpoint, " has invalid scheme ",
parsed.scheme, ". Should be http(s)."));
}
if (!parsed.query.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Query in endpoint unsupported ", endpoint));
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Fragment in endpoint unsupported ", endpoint));
}
if (!aws_region.empty()) {
S3CustomFormatter formatter{std::string(endpoint)};
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
return absl::OkStatus();
}
Future<S3EndpointRegion> ResolveEndpointRegion(
std::string bucket, std::string_view endpoint, std::string host_header,
std::shared_ptr<internal_http::HttpTransport> transport) {
assert(!bucket.empty());
assert(transport);
assert(IsValidBucketName(bucket));
if (endpoint.empty()) {
if (!absl::StrContains(bucket, ".")) {
std::string url = absl::StrFormat("https:
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3VirtualHostFormatter>{
std::move(bucket), {}, S3VirtualHostFormatter{}},
transport->IssueRequest(
HttpRequestBuilder("HEAD", std::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
std::string url =
absl::StrFormat("https:
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3PathFormatter>{
std::move(bucket), {}, S3PathFormatter{}},
transport->IssueRequest(
HttpRequestBuilder("HEAD", std ::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
std::string url = absl::StrFormat("%s/%s", endpoint, bucket);
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3CustomFormatter>{
std::move(bucket), "us-east-1",
S3CustomFormatter{std::string(endpoint)}},
transport->IssueRequest(HttpRequestBuilder("HEAD", std::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
}
} | #include "tensorstore/kvstore/s3/s3_endpoint.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::ResolveEndpointRegion;
using ::tensorstore::internal_kvstore_s3::S3EndpointRegion;
using ::tensorstore::internal_kvstore_s3::ValidateEndpoint;
namespace {
TEST(ValidateEndpointTest, Basic) {
EXPECT_THAT(ValidateEndpoint("testbucket", {}, {}, {}),
::testing::VariantWith<absl::Status>(absl::OkStatus()));
EXPECT_THAT(ValidateEndpoint("test.bucket", {}, {}, {}),
::testing::VariantWith<absl::Status>(absl::OkStatus()));
EXPECT_THAT(ValidateEndpoint("testbucket", "us-east-1", {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", "us-east-1", {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", {}, {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", "us-west-1", {}, {}),
::testing::VariantWith<absl::Status>(
tensorstore::StatusIs(absl::StatusCode::kInvalidArgument)));
EXPECT_THAT(ValidateEndpoint("testbucket", "region", "http:
::testing::VariantWith<S3EndpointRegion>(
S3EndpointRegion{"http:
EXPECT_THAT(
ValidateEndpoint("testbucket", "region", "http:
::testing::VariantWith<S3EndpointRegion>(
S3EndpointRegion{"http:
EXPECT_THAT(ValidateEndpoint("testbucket", {}, {}, "my.header"),
::testing::VariantWith<absl::Status>(
tensorstore::StatusIs(absl::StatusCode::kInvalidArgument)));
}
TEST(ResolveEndpointRegion, Basic) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"HEAD http:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
S3EndpointRegion ehr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr,
ResolveEndpointRegion("testbucket", {}, {}, mock_transport).result());
EXPECT_THAT(ehr.endpoint, "https:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr,
ResolveEndpointRegion("test.bucket", {}, {}, mock_transport).result());
EXPECT_THAT(ehr.endpoint, "https:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr, ResolveEndpointRegion("test.bucket", "http:
mock_transport)
.result());
EXPECT_THAT(ehr.endpoint, "http:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr, ResolveEndpointRegion("test.bucket", "http:
"s3.localhost.com", mock_transport)
.result());
EXPECT_THAT(ehr.endpoint, "http:
EXPECT_THAT(ehr.aws_region, "us-east-1");
}
} |
613 | cpp | google/tensorstore | aws_credentials_resource | tensorstore/kvstore/s3/aws_credentials_resource.cc | tensorstore/kvstore/s3/aws_credentials_resource_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_CREDENTIALS_AWS_CREDENTIALS_RESOURCE_H_
#define TENSORSTORE_KVSTORE_S3_CREDENTIALS_AWS_CREDENTIALS_RESOURCE_H_
#include <stddef.h>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
struct AwsCredentialsResource
: public internal::ContextResourceTraits<AwsCredentialsResource> {
static constexpr char id[] = "aws_credentials";
struct Spec {
std::string profile;
std::string filename;
std::string metadata_endpoint;
bool anonymous = false;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.profile, x.filename, x.metadata_endpoint, x.anonymous);
};
};
struct Resource {
Spec spec;
std::shared_ptr<AwsCredentialProvider> credential_provider_;
Result<std::optional<AwsCredentials>> GetCredentials();
};
static Spec Default() { return Spec{}; }
static constexpr auto JsonBinder() {
return [](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
return AwsCredentialsResource::FromJsonImpl(options, obj, j);
} else {
return AwsCredentialsResource::ToJsonImpl(options, obj, j);
}
};
}
Result<Resource> Create(
const Spec& spec, internal::ContextResourceCreationContext context) const;
Spec GetSpec(const Resource& resource,
const internal::ContextSpecBuilder& builder) const {
return resource.spec;
}
private:
static absl::Status FromJsonImpl(const JsonSerializationOptions& options,
Spec* spec, ::nlohmann::json* j);
static absl::Status ToJsonImpl(const JsonSerializationOptions& options,
const Spec* spec, ::nlohmann::json* j);
};
}
}
#endif
#include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include <stddef.h>
#include <cassert>
#include <memory>
#include <optional>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/credentials/default_credential_provider.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace internal_kvstore_s3 {
using Spec = ::tensorstore::internal_kvstore_s3::AwsCredentialsResource::Spec;
using Resource =
::tensorstore::internal_kvstore_s3::AwsCredentialsResource::Resource;
const internal::ContextResourceRegistration<AwsCredentialsResource>
aws_credentials_registration;
Result<Resource> AwsCredentialsResource::Create(
const Spec& spec, internal::ContextResourceCreationContext context) const {
if (spec.anonymous) {
return Resource{spec, nullptr};
}
auto result = GetAwsCredentialProvider(
spec.filename, spec.profile, spec.metadata_endpoint,
internal_http::GetDefaultHttpTransport());
if (!result.ok() && absl::IsNotFound(result.status())) {
return Resource{spec, nullptr};
}
TENSORSTORE_RETURN_IF_ERROR(result);
return Resource{spec, std::move(*result)};
}
Result<std::optional<AwsCredentials>>
AwsCredentialsResource::Resource::GetCredentials() {
if (!credential_provider_) return std::nullopt;
auto credential_result_ = credential_provider_->GetCredentials();
if (!credential_result_.ok() &&
absl::IsNotFound(credential_result_.status())) {
return std::nullopt;
}
return credential_result_;
}
namespace {
static constexpr auto kAnonymousBinder = jb::Object(jb::Member(
"anonymous", jb::Projection<&Spec::anonymous>(
jb::Validate([](const auto& options, bool* x) {
if (*x != true) {
return absl::InvalidArgumentError(
"\"anonymous\" must be true or not present in "
"\"aws_credentials\"");
}
return absl::OkStatus();
}))));
static constexpr auto kParameterBinder = jb::Object(
jb::OptionalMember("profile", jb::Projection<&Spec::profile>()),
jb::OptionalMember("filename", jb::Projection<&Spec::filename>()),
jb::OptionalMember("metadata_endpoint",
jb::Projection<&Spec::metadata_endpoint>()));
}
absl::Status AwsCredentialsResource::FromJsonImpl(
const JsonSerializationOptions& options, Spec* spec, ::nlohmann::json* j) {
if (auto* j_obj = j->template get_ptr<::nlohmann::json::object_t*>();
j_obj && j_obj->find("anonymous") != j_obj->end()) {
return kAnonymousBinder(std::true_type{}, options, spec, j);
}
return kParameterBinder(std::true_type{}, options, spec, j);
}
absl::Status AwsCredentialsResource::ToJsonImpl(
const JsonSerializationOptions& options, const Spec* spec,
::nlohmann::json* j) {
if (spec->anonymous) {
return kAnonymousBinder(std::false_type{}, options, spec, j);
}
return kParameterBinder(std::false_type{}, options, spec, j);
}
}
} | #include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_kvstore_s3::AwsCredentialsResource;
namespace {
TEST(AwsCredentialsResourceTest, InvalidDirectSpec) {
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson(nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected non-null value, but received: null"));
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson(3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected object, but received: 3"));
EXPECT_THAT(
Context::Resource<AwsCredentialsResource>::FromJson("anonymous"),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Invalid reference to \"aws_credentials\" resource: \"anonymous\""));
}
TEST(AwsCredentialsResourceTest, Default) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<AwsCredentialsResource>::FromJson("aws_credentials"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ExplicitDefault) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
::nlohmann::json::object_t()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ValidSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
{{"profile", "my_profile"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, "my_profile");
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ValidAnonymousSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
{{"anonymous", true}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, true);
EXPECT_THAT(resource->GetCredentials(),
tensorstore::IsOkAndHolds(::testing::Eq(std::nullopt)));
}
TEST(AwsCredentialsResourceTest, InvalidSpecs) {
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson({
{"anonymous", true},
{"profile", "xyz"},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} |
614 | cpp | google/tensorstore | s3_request_builder | tensorstore/kvstore/s3/s3_request_builder.cc | tensorstore/kvstore/s3/s3_request_builder_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_REQUEST_BUILDER_H_
#define TENSORSTORE_KVSTORE_S3_REQUEST_BUILDER_H_
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_uri_utils.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
class S3RequestBuilder {
public:
S3RequestBuilder(std::string_view method, std::string endpoint_url)
: builder_(method, std::move(endpoint_url), S3UriEncode) {}
S3RequestBuilder& AddHeader(std::string_view header) {
builder_.AddHeader(header);
return *this;
}
S3RequestBuilder& AddQueryParameter(std::string key, std::string value) {
query_params_.push_back({std::move(key), std::move(value)});
return *this;
}
S3RequestBuilder& EnableAcceptEncoding() {
builder_.EnableAcceptEncoding();
return *this;
}
S3RequestBuilder& MaybeAddRequesterPayer(bool requester_payer = false);
S3RequestBuilder& MaybeAddRangeHeader(OptionalByteRangeRequest byte_range) {
builder_.MaybeAddRangeHeader(byte_range);
return *this;
}
S3RequestBuilder& MaybeAddCacheControlMaxAgeHeader(absl::Duration max_age) {
builder_.MaybeAddCacheControlMaxAgeHeader(max_age);
return *this;
}
S3RequestBuilder& MaybeAddStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
builder_.MaybeAddStalenessBoundCacheControlHeader(staleness_bound);
return *this;
}
const std::string& GetCanonicalRequest() const { return canonical_request_; }
const std::string& GetSigningString() const { return signing_string_; }
const std::string& GetSignature() const { return signature_; }
internal_http::HttpRequest BuildRequest(std::string_view host_header,
const AwsCredentials& credentials,
std::string_view aws_region,
std::string_view payload_sha256_hash,
const absl::Time& time);
private:
std::string canonical_request_;
std::string signing_string_;
std::string signature_;
std::vector<std::pair<std::string, std::string>> query_params_;
internal_http::HttpRequestBuilder builder_;
};
}
}
#endif
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#include "tensorstore/kvstore/s3/s3_request_builder.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/time/time.h"
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include "tensorstore/internal/digest/sha256.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_uri_utils.h"
using ::tensorstore::internal::ParseGenericUri;
using ::tensorstore::internal::SHA256Digester;
using ::tensorstore::internal_http::HttpRequest;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
constexpr static size_t kHmacSize = 32;
void ComputeHmac(std::string_view key, std::string_view message,
unsigned char (&hmac)[kHmacSize]) {
unsigned int md_len = kHmacSize;
ABSL_CHECK(
HMAC(EVP_sha256(), reinterpret_cast<const unsigned char*>(key.data()),
key.size(), reinterpret_cast<const unsigned char*>(message.data()),
message.size(), hmac, &md_len) &&
md_len == kHmacSize);
}
void ComputeHmac(unsigned char (&key)[kHmacSize], std::string_view message,
unsigned char (&hmac)[kHmacSize]) {
unsigned int md_len = kHmacSize;
ABSL_CHECK(HMAC(EVP_sha256(), key, kHmacSize,
reinterpret_cast<const unsigned char*>(message.data()),
message.size(), hmac, &md_len) &&
md_len == kHmacSize);
}
std::string CanonicalRequest(
std::string_view method, std::string_view path, std::string_view query,
std::string_view payload_hash,
const std::vector<std::pair<std::string, std::string_view>>& headers) {
std::string canonical =
absl::StrCat(method, "\n", S3UriObjectKeyEncode(path), "\n", query, "\n");
std::vector<std::string_view> signed_headers;
signed_headers.reserve(headers.size());
for (auto& pair : headers) {
absl::StrAppend(&canonical, pair.first, ":", pair.second, "\n");
signed_headers.push_back(pair.first);
}
absl::StrAppend(&canonical, "\n", absl::StrJoin(signed_headers, ";"), "\n",
payload_hash);
return canonical;
}
std::string SigningString(std::string_view canonical_request,
const absl::Time& time, std::string_view scope) {
absl::TimeZone utc = absl::UTCTimeZone();
SHA256Digester sha256;
sha256.Write(canonical_request);
const auto digest = sha256.Digest();
auto digest_sv = std::string_view(reinterpret_cast<const char*>(&digest[0]),
digest.size());
return absl::StrFormat("AWS4-HMAC-SHA256\n%s\n%s\n%s",
absl::FormatTime("%Y%m%dT%H%M%SZ", time, utc), scope,
absl::BytesToHexString(digest_sv));
}
void GetSigningKey(std::string_view aws_secret_access_key,
std::string_view aws_region, const absl::Time& time,
unsigned char (&signing_key)[kHmacSize]) {
absl::TimeZone utc = absl::UTCTimeZone();
unsigned char date_key[kHmacSize];
unsigned char date_region_key[kHmacSize];
unsigned char date_region_service_key[kHmacSize];
ComputeHmac(absl::StrCat("AWS4", aws_secret_access_key),
absl::FormatTime("%Y%m%d", time, utc), date_key);
ComputeHmac(date_key, aws_region, date_region_key);
ComputeHmac(date_region_key, "s3", date_region_service_key);
ComputeHmac(date_region_service_key, "aws4_request", signing_key);
}
std::string AuthorizationHeader(
std::string_view access_key, std::string_view scope,
std::string_view signature_hex,
const std::vector<std::pair<std::string, std::string_view>>& headers) {
return absl::StrFormat(
"Authorization: AWS4-HMAC-SHA256 "
"Credential=%s/%s, "
"SignedHeaders=%s, "
"Signature=%s",
access_key, scope,
absl::StrJoin(headers, ";",
[](std::string* out, auto pair) {
absl::StrAppend(out, pair.first);
}),
signature_hex);
}
static constexpr char kAmzContentSha256Header[] = "x-amz-content-sha256: ";
static constexpr char kAmzSecurityTokenHeader[] = "x-amz-security-token: ";
static constexpr char kAmzRequesterPayerHeader[] =
"x-amz-requester-payer: requester";
}
S3RequestBuilder& S3RequestBuilder::MaybeAddRequesterPayer(
bool requester_payer) {
if (requester_payer) {
builder_.AddHeader(kAmzRequesterPayerHeader);
}
return *this;
}
HttpRequest S3RequestBuilder::BuildRequest(std::string_view host_header,
const AwsCredentials& credentials,
std::string_view aws_region,
std::string_view payload_sha256_hash,
const absl::Time& time) {
builder_.AddHostHeader(host_header);
builder_.AddHeader(
absl::StrCat(kAmzContentSha256Header, payload_sha256_hash));
builder_.AddHeader(absl::FormatTime("x-amz-date: %Y%m%dT%H%M%SZ", time,
absl::UTCTimeZone()));
std::stable_sort(std::begin(query_params_), std::end(query_params_));
for (const auto& [k, v] : query_params_) {
builder_.AddQueryParameter(k, v);
}
if (credentials.IsAnonymous()) {
return builder_.BuildRequest();
}
if (!credentials.session_token.empty()) {
builder_.AddHeader(
absl::StrCat(kAmzSecurityTokenHeader, credentials.session_token));
}
auto request = builder_.BuildRequest();
std::vector<std::pair<std::string, std::string_view>> signed_headers;
signed_headers.reserve(request.headers.size());
for (const auto& header_str : request.headers) {
std::string_view header = header_str;
auto pos = header.find(':');
assert(pos != std::string::npos);
auto key = absl::AsciiStrToLower(
absl::StripAsciiWhitespace(header.substr(0, pos)));
auto value = absl::StripAsciiWhitespace(header.substr(pos + 1));
signed_headers.push_back({std::move(key), std::move(value)});
}
std::stable_sort(std::begin(signed_headers), std::end(signed_headers));
auto parsed_uri = ParseGenericUri(request.url);
assert(!parsed_uri.path.empty());
std::string scope = absl::StrFormat(
"%s/%s/s3/aws4_request",
absl::FormatTime("%Y%m%d", time, absl::UTCTimeZone()), aws_region);
canonical_request_ =
CanonicalRequest(request.method, parsed_uri.path, parsed_uri.query,
payload_sha256_hash, signed_headers);
signing_string_ = SigningString(canonical_request_, time, scope);
unsigned char signing_key[kHmacSize];
GetSigningKey(credentials.secret_key, aws_region, time, signing_key);
unsigned char signature[kHmacSize];
ComputeHmac(signing_key, signing_string_, signature);
signature_ = absl::BytesToHexString(
std::string_view(reinterpret_cast<char*>(&signature[0]), kHmacSize));
std::string auth_header = AuthorizationHeader(credentials.access_key, scope,
signature_, signed_headers);
ABSL_LOG_IF(INFO, s3_logging.Level(1))
<< "Canonical Request\n"
<< canonical_request_
<< "\n\nSigning String\n"
<< signing_string_
<< "\n\nSigning Key\n"
<< absl::BytesToHexString(std::string_view(
reinterpret_cast<char*>(signing_key), kHmacSize))
<< "\n\nAuthorization Header\n"
<< auth_header;
request.headers.emplace_back(std::move(auth_header));
return request;
}
}
} | #include "tensorstore/kvstore/s3/s3_request_builder.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/civil_time.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
using ::tensorstore::internal_kvstore_s3::AwsCredentials;
using ::tensorstore::internal_kvstore_s3::S3RequestBuilder;
namespace {
static const AwsCredentials credentials{
"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", ""};
static const absl::TimeZone utc = absl::UTCTimeZone();
static constexpr char aws_region[] = "us-east-1";
static constexpr char bucket[] = "examplebucket";
TEST(S3RequestBuilderTest, SignatureMethods) {
const auto now =
absl::FromCivil(absl::CivilSecond(2024, 2, 21, 03, 02, 05), utc);
auto builder =
S3RequestBuilder(
"PUT", "https:
.AddHeader("content-md5: 1B2M2Y8AsgTpgAmY7PhCfg==")
.AddHeader("content-type: text/plain");
auto request = builder.BuildRequest(
"bucket.s3.us-west-2.amazonaws.com", credentials, "us-west-2",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", now);
auto expected_canonical_request =
"PUT\n"
"/bucket/tensorstore/a-_.~%24%26%2C%3A%3D%40z/b/file.txt\n"
"\n"
"content-md5:1B2M2Y8AsgTpgAmY7PhCfg==\n"
"content-type:text/plain\n"
"host:bucket.s3.us-west-2.amazonaws.com\n"
"x-amz-content-sha256:"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n"
"x-amz-date:20240221T030205Z\n"
"\n"
"content-md5;content-type;host;x-amz-content-sha256;x-amz-date\n"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20240221T030205Z\n"
"20240221/us-west-2/s3/aws4_request\n"
"28c393b04c83956e1d4056351030e34bffa3dd877cf6cf2d0c83d2114bef7940";
auto expected_signature =
"c3bf762eae82b8a87dc5f7af8c2ad8973d4a0132c49bd8c46d025d4a1aa175fb";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
}
TEST(S3RequestBuilderTest, AWS4SignatureGetExample) {
auto url = absl::StrFormat("https:
auto builder = S3RequestBuilder("GET", url).AddHeader("range: bytes=0-9");
auto request = builder.BuildRequest(
"", credentials, aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
auto expected_canonical_request =
"GET\n"
"/test.txt\n"
"\n"
"host:examplebucket.s3.amazonaws.com\n"
"range:bytes=0-9\n"
"x-amz-content-sha256:"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n"
"x-amz-date:20130524T000000Z\n"
"\n"
"host;range;x-amz-content-sha256;x-amz-date\n"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20130524T000000Z\n"
"20130524/us-east-1/s3/aws4_request\n"
"7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972";
auto expected_signature =
"f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41";
auto expected_auth_header =
"Authorization: AWS4-HMAC-SHA256 "
"Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, "
"SignedHeaders=host;range;x-amz-content-sha256;x-amz-date, "
"Signature="
"f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
EXPECT_EQ(request.url, url);
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
expected_auth_header, "host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"x-amz-date: 20130524T000000Z", "range: bytes=0-9"));
}
TEST(S3RequestBuilderTest, AWS4SignaturePutExample) {
auto url = absl::StrFormat("s3:
auto builder = S3RequestBuilder("PUT", url)
.AddHeader("date: Fri, 24 May 2013 00:00:00 GMT")
.AddHeader("x-amz-storage-class: REDUCED_REDUNDANCY");
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials, aws_region,
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
auto expected_canonical_request =
"PUT\n"
"/test%24file.text\n"
"\n"
"date:Fri, 24 May 2013 00:00:00 GMT\n"
"host:examplebucket.s3.amazonaws.com\n"
"x-amz-content-sha256:"
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072\n"
"x-amz-date:20130524T000000Z\n"
"x-amz-storage-class:REDUCED_REDUNDANCY\n"
"\n"
"date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class\n"
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20130524T000000Z\n"
"20130524/us-east-1/s3/aws4_request\n"
"9e0e90d9c76de8fa5b200d8c849cd5b8dc7a3be3951ddb7f6a76b4158342019d";
auto expected_signature =
"98ad721746da40c64f1a55b78f14c238d841ea1380cd77a1b5971af0ece108bd";
auto expected_auth_header =
"Authorization: AWS4-HMAC-SHA256 "
"Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, "
"SignedHeaders=date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-"
"class, "
"Signature="
"98ad721746da40c64f1a55b78f14c238d841ea1380cd77a1b5971af0ece108bd";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
EXPECT_EQ(request.url, url);
EXPECT_EQ(request.headers.size(), 6);
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
expected_auth_header, "date: Fri, 24 May 2013 00:00:00 GMT",
"host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072",
"x-amz-date: 20130524T000000Z",
"x-amz-storage-class: REDUCED_REDUNDANCY"));
}
TEST(S3RequestBuilderTest, AWS4SignatureListObjectsExample) {
auto url = absl::StrFormat("https:
auto builder = S3RequestBuilder("GET", url)
.AddQueryParameter("prefix", "J")
.AddQueryParameter("max-keys", "2");
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials, aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
auto expected_canonical_request =
"GET\n"
"/\n"
"max-keys=2&prefix=J\n"
"host:examplebucket.s3.amazonaws.com\n"
"x-amz-content-sha256:"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n"
"x-amz-date:20130524T000000Z\n"
"\n"
"host;x-amz-content-sha256;x-amz-date\n"
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
auto expected_signing_string =
"AWS4-HMAC-SHA256\n"
"20130524T000000Z\n"
"20130524/us-east-1/s3/aws4_request\n"
"df57d21db20da04d7fa30298dd4488ba3a2b47ca3a489c74750e0f1e7df1b9b7";
auto expected_signature =
"34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7";
auto expected_auth_header =
"Authorization: AWS4-HMAC-SHA256 "
"Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, "
"SignedHeaders=host;x-amz-content-sha256;x-amz-date, "
"Signature="
"34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7";
EXPECT_EQ(builder.GetCanonicalRequest(), expected_canonical_request);
EXPECT_EQ(builder.GetSigningString(), expected_signing_string);
EXPECT_EQ(builder.GetSignature(), expected_signature);
EXPECT_EQ(request.url, absl::StrCat(url, "?max-keys=2&prefix=J"));
EXPECT_EQ(request.headers.size(), 4);
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
expected_auth_header, "host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"x-amz-date: 20130524T000000Z"));
}
TEST(S3RequestBuilderTest, AnonymousCredentials) {
auto url = absl::StrFormat("https:
auto builder = S3RequestBuilder("GET", url).AddQueryParameter("test", "this");
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), AwsCredentials{},
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_EQ(request.url, absl::StrCat(url, "?test=this"));
EXPECT_EQ(request.headers.size(), 3);
EXPECT_THAT(request.headers, ::testing::Not(::testing::Contains(
::testing::HasSubstr("Authorization:"))));
EXPECT_THAT(
request.headers,
::testing::UnorderedElementsAre(
"host: examplebucket.s3.amazonaws.com",
"x-amz-content-sha256: "
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"x-amz-date: 20130524T000000Z"));
}
TEST(S3RequestBuilderTest, AwsSessionTokenHeaderAdded) {
auto token = "abcdef1234567890";
auto sts_credentials =
AwsCredentials{credentials.access_key, credentials.secret_key, token};
auto builder =
S3RequestBuilder("GET", absl::StrFormat("https:
auto request = builder.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), sts_credentials,
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_EQ(request.headers.size(), 5);
EXPECT_THAT(request.headers,
::testing::Contains(::testing::HasSubstr("Authorization: ")));
EXPECT_THAT(request.headers, ::testing::Contains(absl::StrCat(
"x-amz-security-token: ", token)));
}
TEST(S3RequestBuilderTest, AwsRequesterPaysHeaderAdded) {
auto request =
S3RequestBuilder("GET", absl::StrFormat("https:
.MaybeAddRequesterPayer(false)
.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials,
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85"
"5",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_THAT(request.headers,
::testing::Not(::testing::Contains(
::testing::HasSubstr("x-amz-requester-payer"))));
request =
S3RequestBuilder("GET", absl::StrFormat("https:
.MaybeAddRequesterPayer(true)
.BuildRequest(
absl::StrFormat("%s.s3.amazonaws.com", bucket), credentials,
aws_region,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85"
"5",
absl::FromCivil(absl::CivilSecond(2013, 5, 24, 0, 0, 0), utc));
EXPECT_THAT(request.headers,
::testing::Contains("x-amz-requester-payer: requester"));
}
} |
615 | cpp | google/tensorstore | validate | tensorstore/kvstore/gcs/validate.cc | tensorstore/kvstore/gcs/validate_test.cc | #ifndef TENSORSTORE_KVSTORE_GCS_VALIDATE_H_
#define TENSORSTORE_KVSTORE_GCS_VALIDATE_H_
#include <string_view>
#include "absl/status/status.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/kvstore/generation.h"
namespace tensorstore {
namespace internal_storage_gcs {
bool IsValidBucketName(std::string_view bucket);
bool IsValidObjectName(std::string_view name);
bool IsValidStorageGeneration(const StorageGeneration& gen);
inline bool IsRetriable(const absl::Status& status) {
return (status.code() == absl::StatusCode::kDeadlineExceeded ||
status.code() == absl::StatusCode::kResourceExhausted ||
status.code() == absl::StatusCode::kUnavailable);
}
absl::Status GcsHttpResponseToStatus(
const internal_http::HttpResponse& response, bool& retryable,
SourceLocation loc = ::tensorstore::SourceLocation::current());
}
}
#endif
#include "tensorstore/kvstore/gcs/validate.h"
#include <iterator>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/utf8.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_storage_gcs {
bool IsValidBucketName(std::string_view bucket) {
if (bucket.size() < 3 || bucket.size() > 222) return false;
if (!absl::ascii_isdigit(*bucket.begin()) &&
!absl::ascii_islower(*bucket.begin())) {
return false;
}
if (!absl::ascii_isdigit(*bucket.rbegin()) &&
!absl::ascii_islower(*bucket.rbegin())) {
return false;
}
for (std::string_view v : absl::StrSplit(bucket, absl::ByChar('.'))) {
if (v.empty()) return false;
if (v.size() > 63) return false;
if (*v.begin() == '-') return false;
if (*v.rbegin() == '-') return false;
for (const auto ch : v) {
if (ch != '-' && ch != '_' && !absl::ascii_isdigit(ch) &&
!absl::ascii_islower(ch)) {
return false;
}
}
}
return true;
}
bool IsValidObjectName(std::string_view name) {
if (name.empty() || name.size() > 1024) return false;
if (name == "." || name == "..") return false;
if (absl::StartsWith(name, ".well-known/acme-challenge")) return false;
for (const auto ch : name) {
if (ch == '\r' || ch == '\n') return false;
if (absl::ascii_iscntrl(ch)) return false;
}
return internal::IsValidUtf8(name);
}
bool IsValidStorageGeneration(const StorageGeneration& gen) {
return StorageGeneration::IsUnknown(gen) ||
StorageGeneration::IsNoValue(gen) ||
(StorageGeneration::IsUint64(gen) &&
StorageGeneration::ToUint64(gen) > 0);
}
absl::Status GcsHttpResponseToStatus(
const internal_http::HttpResponse& response, bool& retryable,
SourceLocation loc) {
auto absl_status_code = HttpResponseCodeToStatusCode(response);
if (absl_status_code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
retryable = (response.status_code == 429 ||
response.status_code == 408 ||
response.status_code >= 500
);
std::string error_message;
auto payload = response.payload;
auto payload_str = payload.Flatten();
if (auto j_obj = internal::ParseJson(payload_str); j_obj.is_object()) {
if (auto j_error = internal_json::JsonExtractMember(
j_obj.template get_ptr<::nlohmann::json::object_t*>(), "error");
j_error.is_object()) {
if (auto j_message = internal_json::JsonExtractMember(
j_error.template get_ptr<::nlohmann::json::object_t*>(),
"message");
j_message.is_string()) {
error_message = j_message.template get<std::string>();
}
}
}
if (error_message.empty()) {
error_message = HttpResponseCodeToMessage(response);
if (error_message.empty()) {
error_message = "Unknown";
}
}
absl::Status status(absl_status_code, error_message);
status.SetPayload("http_response_code",
absl::Cord(absl::StrFormat("%d", response.status_code)));
if (!payload_str.empty()) {
status.SetPayload(
"http_response_body",
payload.Subcord(0,
payload_str.size() < 256 ? payload_str.size() : 256));
}
if (auto id_header = response.headers.find("x-guploader-uploadid");
id_header != response.headers.end()) {
status.SetPayload("x-guploader-uploadid", absl::Cord(id_header->second));
}
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/kvstore/gcs/validate.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_storage_gcs::IsValidBucketName;
using ::tensorstore::internal_storage_gcs::IsValidObjectName;
TEST(ValidateTest, IsValidBucketName) {
EXPECT_TRUE(IsValidBucketName("foo"));
EXPECT_TRUE(IsValidBucketName("a.b"));
EXPECT_TRUE(IsValidBucketName("a-b"));
EXPECT_TRUE(IsValidBucketName("1.2.3.4"));
EXPECT_FALSE(IsValidBucketName("_abc"));
EXPECT_FALSE(IsValidBucketName("abc_"));
EXPECT_FALSE(
IsValidBucketName("1234567890b123456789012345678901234567890"
"1234567890b123456789012345678901234567890"
"abcd"));
EXPECT_TRUE(IsValidBucketName("a._b"));
EXPECT_TRUE(IsValidBucketName("a_.b"));
EXPECT_FALSE(IsValidBucketName("."));
EXPECT_FALSE(IsValidBucketName(".."));
EXPECT_FALSE(IsValidBucketName("aa"));
EXPECT_FALSE(IsValidBucketName("_foo"));
EXPECT_FALSE(IsValidBucketName("foo_"));
EXPECT_FALSE(IsValidBucketName("a..b"));
EXPECT_FALSE(IsValidBucketName("a.-b"));
EXPECT_FALSE(IsValidBucketName("a-.b"));
EXPECT_FALSE(
IsValidBucketName("1234567890b123456789012345678901234567890"
"1234567890b123456789012345678901234567890"
"abcd.b"));
}
TEST(ValidateTest, IsValidObjectName) {
EXPECT_TRUE(IsValidObjectName("foo"));
EXPECT_TRUE(IsValidObjectName("foo.bar"));
EXPECT_FALSE(IsValidObjectName(""));
EXPECT_FALSE(IsValidObjectName("."));
EXPECT_FALSE(IsValidObjectName(".."));
EXPECT_FALSE(IsValidObjectName(".well-known/acme-challenge"));
EXPECT_FALSE(IsValidObjectName("foo\rbar"));
EXPECT_FALSE(IsValidObjectName("foo\nbar"));
EXPECT_TRUE(IsValidObjectName("foo[*?#]"));
EXPECT_FALSE(IsValidObjectName("foo\004bar"));
EXPECT_FALSE(IsValidObjectName("foo\tbar"));
EXPECT_FALSE(IsValidObjectName("\xfe\xfe\xff\xff"));
EXPECT_FALSE(IsValidObjectName("\xfc\x80\x80\x80\x80\xaf"));
}
} |
616 | cpp | google/tensorstore | s3_metadata | tensorstore/kvstore/s3/s3_metadata.cc | tensorstore/kvstore/s3/s3_metadata_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_OBJECT_METADATA_H_
#define TENSORSTORE_KVSTORE_S3_OBJECT_METADATA_H_
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/util/result.h"
namespace tinyxml2 {
class XMLNode;
}
namespace tensorstore {
namespace internal_kvstore_s3 {
std::string GetNodeText(tinyxml2::XMLNode* node);
std::optional<int64_t> GetNodeInt(tinyxml2::XMLNode* node);
std::optional<absl::Time> GetNodeTimestamp(tinyxml2::XMLNode* node);
Result<StorageGeneration> StorageGenerationFromHeaders(
const absl::btree_multimap<std::string, std::string>& headers);
absl::Status AwsHttpResponseToStatus(
const internal_http::HttpResponse& response, bool& retryable,
SourceLocation loc = ::tensorstore::SourceLocation::current());
}
}
#endif
#include "tensorstore/kvstore/s3/s3_metadata.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <initializer_list>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "re2/re2.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tinyxml2.h"
using ::tensorstore::internal_http::HttpResponse;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kEtag[] = "etag";
static constexpr char kLt[] = "<";
static constexpr char kGt[] = ">";
static constexpr char kQuot[] = """;
static constexpr char kApos[] = "'";
static constexpr char kAmp[] = "&";
std::string UnescapeXml(std::string_view data) {
static LazyRE2 kSpecialXmlSymbols = {"(>|<|"|'|&)"};
std::string_view search = data;
std::string_view symbol;
size_t result_len = data.length();
while (RE2::FindAndConsume(&search, *kSpecialXmlSymbols, &symbol)) {
result_len -= symbol.length() - 1;
}
if (result_len == data.length()) {
return std::string(data);
}
search = data;
size_t pos = 0;
size_t res_pos = 0;
auto result = std::string(result_len, '0');
while (RE2::FindAndConsume(&search, *kSpecialXmlSymbols, &symbol)) {
size_t next = data.length() - search.length();
for (size_t i = pos; i < next - symbol.length(); ++i, ++res_pos) {
result[res_pos] = data[i];
}
if (symbol == kGt) {
result[res_pos++] = '>';
} else if (symbol == kLt) {
result[res_pos++] = '<';
} else if (symbol == kQuot) {
result[res_pos++] = '"';
} else if (symbol == kApos) {
result[res_pos++] = '`';
} else if (symbol == kAmp) {
result[res_pos++] = '&';
} else {
assert(false);
}
pos = next;
}
for (size_t i = pos; i < data.length(); ++i, ++res_pos) {
result[res_pos] = data[i];
}
return result;
}
bool IsRetryableAwsStatusCode(int32_t status_code) {
switch (status_code) {
case 408:
case 419:
case 429:
case 440:
case 500:
case 502:
case 503:
case 504:
case 509:
case 598:
case 599:
return true;
default:
return false;
}
}
bool IsRetryableAwsMessageCode(std::string_view code) {
static const absl::NoDestructor<absl::flat_hash_set<std::string_view>>
kRetryableMessages(absl::flat_hash_set<std::string_view>({
"InternalFailureException",
"InternalFailure",
"InternalServerError",
"InternalError",
"RequestExpiredException",
"RequestExpired",
"ServiceUnavailableException",
"ServiceUnavailableError",
"ServiceUnavailable",
"RequestThrottledException",
"RequestThrottled",
"ThrottlingException",
"ThrottledException",
"Throttling",
"SlowDownException",
"SlowDown",
"RequestTimeTooSkewedException",
"RequestTimeTooSkewed",
"RequestTimeoutException",
"RequestTimeout",
}));
return kRetryableMessages->contains(code);
}
}
std::optional<int64_t> GetNodeInt(tinyxml2::XMLNode* node) {
if (!node) {
return std::nullopt;
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
int64_t result;
if (absl::SimpleAtoi(printer.CStr(), &result)) {
return result;
}
return std::nullopt;
}
std::optional<absl::Time> GetNodeTimestamp(tinyxml2::XMLNode* node) {
if (!node) {
return std::nullopt;
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
absl::Time result;
if (absl::ParseTime(absl::RFC3339_full, printer.CStr(), absl::UTCTimeZone(),
&result, nullptr)) {
return result;
}
return std::nullopt;
}
std::string GetNodeText(tinyxml2::XMLNode* node) {
if (!node) {
return "";
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
return UnescapeXml(printer.CStr());
}
Result<StorageGeneration> StorageGenerationFromHeaders(
const absl::btree_multimap<std::string, std::string>& headers) {
if (auto it = headers.find(kEtag); it != headers.end()) {
return StorageGeneration::FromString(it->second);
}
return absl::NotFoundError("etag not found in response headers");
}
absl::Status AwsHttpResponseToStatus(const HttpResponse& response,
bool& retryable, SourceLocation loc) {
auto absl_status_code = internal_http::HttpResponseCodeToStatusCode(response);
if (absl_status_code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
std::string error_type;
if (auto error_header = response.headers.find("x-amzn-errortype");
error_header != response.headers.end()) {
error_type = error_header->second;
}
absl::Cord request_id;
if (auto request_id_header = response.headers.find("x-amzn-requestid");
request_id_header != response.headers.end()) {
request_id = request_id_header->second;
}
std::string message;
auto payload = response.payload;
auto payload_str = payload.Flatten();
[&]() {
if (payload.empty()) return;
tinyxml2::XMLDocument xmlDocument;
if (int xmlcode = xmlDocument.Parse(payload_str.data(), payload_str.size());
xmlcode != tinyxml2::XML_SUCCESS) {
return;
}
auto* root_node = xmlDocument.FirstChildElement("Error");
if (root_node == nullptr) return;
if (error_type.empty()) {
error_type = GetNodeText(root_node->FirstChildElement("Code"));
}
if (request_id.empty()) {
request_id = GetNodeText(root_node->FirstChildElement("RequestId"));
}
message = GetNodeText(root_node->FirstChildElement("Message"));
}();
retryable = error_type.empty()
? IsRetryableAwsStatusCode(response.status_code)
: IsRetryableAwsMessageCode(error_type);
if (error_type.empty()) {
error_type = "Unknown";
}
absl::Status status(absl_status_code,
absl::StrFormat("%s%s%s", error_type,
message.empty() ? "" : ": ", message));
status.SetPayload("http_response_code",
absl::Cord(absl::StrFormat("%d", response.status_code)));
if (!payload_str.empty()) {
status.SetPayload(
"http_response_body",
payload.Subcord(0,
payload_str.size() < 256 ? payload_str.size() : 256));
}
if (!request_id.empty()) {
status.SetPayload("x-amzn-requestid", request_id);
}
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/kvstore/s3/s3_metadata.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/util/status_testutil.h"
#include "tinyxml2.h"
namespace {
using ::tensorstore::StatusIs;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::AwsHttpResponseToStatus;
using ::tensorstore::internal_kvstore_s3::GetNodeInt;
using ::tensorstore::internal_kvstore_s3::GetNodeText;
using ::tensorstore::internal_kvstore_s3::GetNodeTimestamp;
static constexpr char kListXml[] =
R"(<ListBucketResult xmlns="http:
R"(<Name>i-dont-exist</Name>)"
R"(<Prefix>tensorstore/test/</Prefix>)"
R"(<KeyCount>3</KeyCount>)"
R"(<MaxKeys>1000</MaxKeys>)"
R"(<IsTruncated>false</IsTruncated>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/abc</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"900150983cd24fb0d6963f7d28e17f72"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>3</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/ab>cd</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"e2fc714c4727ee9395f324cd2e7f331f"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>4</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/abcde</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"ab56b4d92b40713acc5af89985d4b786"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>5</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(</ListBucketResult>)";
TEST(XmlSearchTest, GetNodeValues) {
tinyxml2::XMLDocument xmlDocument;
ASSERT_EQ(xmlDocument.Parse(kListXml), tinyxml2::XML_SUCCESS);
auto* root = xmlDocument.FirstChildElement("ListBucketResult");
ASSERT_NE(root, nullptr);
EXPECT_EQ("i-dont-exist", GetNodeText(root->FirstChildElement("Name")));
auto* contents = root->FirstChildElement("Contents");
ASSERT_NE(contents, nullptr);
EXPECT_EQ(R"("900150983cd24fb0d6963f7d28e17f72")",
GetNodeText(contents->FirstChildElement("ETag")));
EXPECT_THAT(GetNodeInt(contents->FirstChildElement("Size")),
::testing::Optional(::testing::Eq(3)));
EXPECT_THAT(
GetNodeTimestamp(contents->FirstChildElement("LastModified")),
::testing::Optional(::testing::Eq(absl::FromUnixSeconds(1688830015))));
}
TEST(S3MetadataTest, AwsHttpResponseToStatus) {
HttpResponse response;
{
response.status_code = 404;
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_FALSE(retryable);
}
{
response.status_code = 429;
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kUnavailable));
EXPECT_TRUE(retryable);
}
{
response.status_code = 400;
response.payload = absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>UnknownError</Code>
<Message>Unknown message</Message>
<Resource>/mybucket/myfoto.jpg</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_FALSE(retryable);
}
{
response.status_code = 400;
response.payload = absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ThrottledException</Code>
<Message>Throttled message</Message>
<Resource>/mybucket/myfoto.jpg</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_TRUE(retryable);
}
{
response.status_code = 400;
response.headers.emplace("x-amzn-errortype", "UnknownError");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_FALSE(retryable);
}
{
response.status_code = 400;
response.headers.clear();
response.headers.emplace("x-amzn-errortype", "ThrottledException");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_TRUE(retryable);
}
}
} |
617 | cpp | google/tensorstore | ec2_credential_provider | tensorstore/kvstore/s3/credentials/ec2_credential_provider.cc | tensorstore/kvstore/s3/credentials/ec2_credential_provider_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_CREDENTIALS_EC2_CREDENTIAL_PROVIDER_H_
#define TENSORSTORE_KVSTORE_S3_CREDENTIALS_EC2_CREDENTIAL_PROVIDER_H_
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
class EC2MetadataCredentialProvider : public AwsCredentialProvider {
public:
EC2MetadataCredentialProvider(
std::string_view endpoint,
std::shared_ptr<internal_http::HttpTransport> transport)
: endpoint_(endpoint), transport_(std::move(transport)) {}
Result<AwsCredentials> GetCredentials() override;
inline const std::string& GetEndpoint() const { return endpoint_; }
private:
std::string endpoint_;
std::shared_ptr<internal_http::HttpTransport> transport_;
};
}
}
#endif
#include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_metadata.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/std_optional.h"
ABSL_FLAG(std::optional<std::string>,
tensorstore_aws_ec2_metadata_service_endpoint, std::nullopt,
"Endpoint to used for http access AWS metadata service. "
"Overrides AWS_EC2_METADATA_SERVICE_ENDPOINT.");
using ::tensorstore::Result;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal::ParseJson;
using ::tensorstore::internal_http::HttpRequestBuilder;
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kMetadataTokenHeader[] = "x-aws-ec2-metadata-token:";
static constexpr char kIamCredentialsPath[] =
"/latest/meta-data/iam/security-credentials/";
static constexpr absl::Duration kConnectTimeout = absl::Milliseconds(200);
static constexpr absl::Duration kDefaultTimeout = absl::Minutes(5);
static constexpr char kSuccess[] = "Success";
std::string GetEC2MetadataServiceEndpoint() {
return GetFlagOrEnvValue(FLAGS_tensorstore_aws_ec2_metadata_service_endpoint,
"AWS_EC2_METADATA_SERVICE_ENDPOINT")
.value_or("http:
}
struct EC2CredentialsResponse {
std::string code;
std::optional<absl::Time> last_updated;
std::optional<std::string> type;
std::optional<std::string> access_key_id;
std::optional<std::string> secret_access_key;
std::optional<std::string> token;
std::optional<absl::Time> expiration;
};
inline constexpr auto EC2CredentialsResponseBinder = jb::Object(
jb::Member("Code", jb::Projection(&EC2CredentialsResponse::code)),
jb::OptionalMember("LastUpdated",
jb::Projection(&EC2CredentialsResponse::last_updated)),
jb::OptionalMember("Type", jb::Projection(&EC2CredentialsResponse::type)),
jb::OptionalMember("AccessKeyId",
jb::Projection(&EC2CredentialsResponse::access_key_id)),
jb::OptionalMember(
"SecretAccessKey",
jb::Projection(&EC2CredentialsResponse::secret_access_key)),
jb::OptionalMember("Token", jb::Projection(&EC2CredentialsResponse::token)),
jb::OptionalMember("Expiration",
jb::Projection(&EC2CredentialsResponse::expiration)));
Result<absl::Cord> GetEC2ApiToken(std::string_view endpoint,
internal_http::HttpTransport& transport) {
auto token_request =
HttpRequestBuilder("POST",
tensorstore::StrCat(endpoint, "/latest/api/token"))
.AddHeader("x-aws-ec2-metadata-token-ttl-seconds: 21600")
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto token_response,
transport
.IssueRequest(token_request,
internal_http::IssueRequestOptions()
.SetRequestTimeout(absl::InfiniteDuration())
.SetConnectTimeout(kConnectTimeout))
.result());
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(token_response, is_retryable));
return std::move(token_response.payload);
}
}
Result<AwsCredentials> EC2MetadataCredentialProvider::GetCredentials() {
if (endpoint_.empty()) {
endpoint_ = GetEC2MetadataServiceEndpoint();
}
TENSORSTORE_ASSIGN_OR_RETURN(auto api_token,
GetEC2ApiToken(endpoint_, *transport_));
auto token_header = tensorstore::StrCat(kMetadataTokenHeader, api_token);
auto iam_role_request =
HttpRequestBuilder("GET",
tensorstore::StrCat(endpoint_, kIamCredentialsPath))
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_role_response,
transport_->IssueRequest(iam_role_request, {}).result());
auto iam_role_plain_text = iam_role_response.payload.Flatten();
bool is_retryable = false;
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_role_response, is_retryable));
std::vector<std::string_view> iam_roles =
absl::StrSplit(iam_role_plain_text, '\n', absl::SkipWhitespace());
if (iam_roles.empty()) {
return absl::NotFoundError("Empty EC2 Role list");
}
auto iam_credentials_request_url =
tensorstore::StrCat(endpoint_, kIamCredentialsPath, iam_roles[0]);
auto iam_credentials_request =
HttpRequestBuilder("GET", iam_credentials_request_url)
.AddHeader(token_header)
.BuildRequest();
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials_response,
transport_->IssueRequest(iam_credentials_request, {}).result());
auto iam_credentials_plain_text = iam_credentials_response.payload.Flatten();
TENSORSTORE_RETURN_IF_ERROR(
AwsHttpResponseToStatus(iam_credentials_response, is_retryable));
auto json_credentials = ParseJson(iam_credentials_plain_text);
TENSORSTORE_ASSIGN_OR_RETURN(
auto iam_credentials,
jb::FromJson<EC2CredentialsResponse>(json_credentials,
EC2CredentialsResponseBinder));
if (iam_credentials.code != kSuccess) {
return absl::NotFoundError(
absl::StrCat("EC2Metadata request to [", iam_credentials_request_url,
"] failed with code ", iam_credentials.code));
}
auto default_timeout = absl::Now() + kDefaultTimeout;
auto expires_at =
iam_credentials.expiration.value_or(default_timeout) - absl::Seconds(60);
return AwsCredentials{iam_credentials.access_key_id.value_or(""),
iam_credentials.secret_access_key.value_or(""),
iam_credentials.token.value_or(""), expires_at};
}
}
} | #include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/kvstore/s3/credentials/test_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::DefaultEC2MetadataFlow;
using ::tensorstore::internal_kvstore_s3::EC2MetadataCredentialProvider;
static constexpr char kDefaultEndpoint[] = "http:
static constexpr char kCustomEndpoint[] = "http:
static constexpr char kApiToken[] = "1234567890";
static constexpr char kAccessKey[] = "ASIA1234567890";
static constexpr char kSecretKey[] = "1234567890abcdef";
static constexpr char kSessionToken[] = "abcdef123456790";
class EC2MetadataCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override { UnsetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT"); }
};
TEST_F(EC2MetadataCredentialProviderTest, CredentialRetrievalFlow) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kDefaultEndpoint, kApiToken, kAccessKey,
kSecretKey, kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, EnvironmentVariableMetadataServer) {
SetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT", kCustomEndpoint);
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, InjectedMetadataServer) {
auto expiry = absl::Now() + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kCustomEndpoint, kApiToken, kAccessKey, kSecretKey,
kSessionToken, expiry));
auto provider = std::make_shared<EC2MetadataCredentialProvider>(
kCustomEndpoint, mock_transport);
TENSORSTORE_CHECK_OK_AND_ASSIGN(auto credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kCustomEndpoint);
ASSERT_EQ(credentials.access_key, kAccessKey);
ASSERT_EQ(credentials.secret_key, kSecretKey);
ASSERT_EQ(credentials.session_token, kSessionToken);
ASSERT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
}
TEST_F(EC2MetadataCredentialProviderTest, NoIamRolesInSecurityCredentials) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{""}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
ASSERT_FALSE(provider->GetCredentials());
ASSERT_EQ(provider->GetEndpoint(), kDefaultEndpoint);
EXPECT_THAT(provider->GetCredentials().status().ToString(),
::testing::HasSubstr("Empty EC2 Role list"));
}
TEST_F(EC2MetadataCredentialProviderTest, UnsuccessfulJsonResponse) {
auto url_to_response = absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{200, absl::Cord{kApiToken}}},
{"GET http:
HttpResponse{
200, absl::Cord{"info"}, {{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET http:
HttpResponse{200,
absl::Cord{"mock-iam-role"},
{{"x-aws-ec2-metadata-token", kApiToken}}}},
{"GET "
"http:
"mock-iam-role",
HttpResponse{200,
absl::Cord(R"({"Code": "EntirelyUnsuccessful"})"),
{{"x-aws-ec2-metadata-token", kApiToken}}}}};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(std::move(url_to_response));
auto provider =
std::make_shared<EC2MetadataCredentialProvider>("", mock_transport);
auto credentials = provider->GetCredentials();
EXPECT_THAT(credentials.status(), MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(credentials.status().ToString(),
::testing::AllOf(::testing::HasSubstr("EC2Metadata request"),
::testing::HasSubstr("EntirelyUnsuccessful")));
}
} |
618 | cpp | google/tensorstore | default_credential_provider | tensorstore/kvstore/s3/credentials/default_credential_provider.cc | tensorstore/kvstore/s3/credentials/default_credential_provider_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_CREDENTIALS_DEFAULT_CREDENTIAL_PROVIDER_H_
#define TENSORSTORE_KVSTORE_S3_CREDENTIALS_DEFAULT_CREDENTIAL_PROVIDER_H_
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include "absl/base/thread_annotations.h"
#include "absl/functional/function_ref.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
class DefaultAwsCredentialsProvider : public AwsCredentialProvider {
public:
struct Options {
std::string filename;
std::string profile;
std::string endpoint;
std::shared_ptr<internal_http::HttpTransport> transport;
};
DefaultAwsCredentialsProvider(
Options options = {{}, {}, {}, internal_http::GetDefaultHttpTransport()},
absl::FunctionRef<absl::Time()> clock = absl::Now);
Result<AwsCredentials> GetCredentials() override;
private:
Options options_;
absl::FunctionRef<absl::Time()> clock_;
absl::Mutex mutex_;
std::unique_ptr<AwsCredentialProvider> provider_ ABSL_GUARDED_BY(mutex_);
AwsCredentials credentials_ ABSL_GUARDED_BY(mutex_);
};
using AwsCredentialProviderFn =
std::function<Result<std::unique_ptr<AwsCredentialProvider>>()>;
void RegisterAwsCredentialProviderProvider(AwsCredentialProviderFn provider,
int priority);
Result<std::unique_ptr<AwsCredentialProvider>> GetAwsCredentialProvider(
std::string_view filename, std::string_view profile,
std::string_view metadata_endpoint,
std::shared_ptr<internal_http::HttpTransport> transport);
}
}
#endif
#include "tensorstore/kvstore/s3/credentials/default_credential_provider.h"
#include <algorithm>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/no_destructor.h"
#include "absl/functional/function_ref.h"
#include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/credentials/ec2_credential_provider.h"
#include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h"
#include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
struct AwsCredentialProviderRegistry {
std::vector<std::pair<int, AwsCredentialProviderFn>> providers;
absl::Mutex mutex;
};
AwsCredentialProviderRegistry& GetAwsProviderRegistry() {
static absl::NoDestructor<AwsCredentialProviderRegistry> registry;
return *registry;
}
}
void RegisterAwsCredentialProviderProvider(AwsCredentialProviderFn provider,
int priority) {
auto& registry = GetAwsProviderRegistry();
absl::WriterMutexLock lock(®istry.mutex);
registry.providers.emplace_back(priority, std::move(provider));
std::sort(registry.providers.begin(), registry.providers.end(),
[](const auto& a, const auto& b) { return a.first < b.first; });
}
Result<std::unique_ptr<AwsCredentialProvider>> GetAwsCredentialProvider(
std::string_view filename, std::string_view profile,
std::string_view metadata_endpoint,
std::shared_ptr<internal_http::HttpTransport> transport) {
auto& registry = GetAwsProviderRegistry();
absl::WriterMutexLock lock(®istry.mutex);
for (const auto& provider : registry.providers) {
auto credentials = provider.second();
if (credentials.ok()) return credentials;
}
return std::make_unique<DefaultAwsCredentialsProvider>(
DefaultAwsCredentialsProvider::Options{
std::string{filename}, std::string{profile},
std::string{metadata_endpoint}, transport});
}
DefaultAwsCredentialsProvider::DefaultAwsCredentialsProvider(
Options options, absl::FunctionRef<absl::Time()> clock)
: options_(std::move(options)),
clock_(clock),
credentials_{{}, {}, {}, absl::InfinitePast()} {}
Result<AwsCredentials> DefaultAwsCredentialsProvider::GetCredentials() {
{
absl::ReaderMutexLock lock(&mutex_);
if (credentials_.expires_at > clock_()) {
return credentials_;
}
}
absl::WriterMutexLock lock(&mutex_);
if (provider_) {
auto credentials_result = provider_->GetCredentials();
if (credentials_result.ok()) {
credentials_ = credentials_result.value();
return credentials_;
}
}
bool only_default_options = options_.filename.empty() &&
options_.profile.empty() &&
options_.endpoint.empty();
if (only_default_options) {
provider_ = std::make_unique<EnvironmentCredentialProvider>();
if (auto credentials_result = provider_->GetCredentials();
credentials_result.ok()) {
credentials_ = std::move(credentials_result).value();
return credentials_;
} else if (s3_logging) {
ABSL_LOG_FIRST_N(INFO, 1)
<< "Could not acquire credentials from environment: "
<< credentials_result.status();
}
}
if (only_default_options || !options_.filename.empty() ||
!options_.profile.empty()) {
provider_ = std::make_unique<FileCredentialProvider>(options_.filename,
options_.profile);
if (auto credentials_result = provider_->GetCredentials();
credentials_result.ok()) {
credentials_ = std::move(credentials_result).value();
return credentials_;
} else if (s3_logging) {
ABSL_LOG_FIRST_N(INFO, 1)
<< "Could not acquire credentials from file/profile: "
<< credentials_result.status();
}
}
if (only_default_options || !options_.endpoint.empty()) {
provider_ = std::make_unique<EC2MetadataCredentialProvider>(
options_.endpoint, options_.transport);
if (auto credentials_result = provider_->GetCredentials();
credentials_result.ok()) {
credentials_ = std::move(credentials_result).value();
return credentials_;
} else if (s3_logging) {
ABSL_LOG(INFO)
<< "Could not acquire credentials from EC2 Metadata Server "
<< options_.endpoint << ": " << credentials_result.status();
}
}
provider_ = nullptr;
credentials_ = AwsCredentials::Anonymous();
return credentials_;
}
}
} | #include "tensorstore/kvstore/s3/credentials/default_credential_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/kvstore/s3/credentials/test_utils.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::DefaultAwsCredentialsProvider;
using ::tensorstore::internal_kvstore_s3::DefaultEC2MetadataFlow;
using Options =
::tensorstore::internal_kvstore_s3::DefaultAwsCredentialsProvider::Options;
static constexpr char kEndpoint[] = "http:
class CredentialFileFactory
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteCredentialsFile() {
auto p = JoinPath(path(), "aws_config");
std::ofstream ofs(p);
ofs << "[alice]\n"
"aws_access_key_id = AKIAIOSFODNN6EXAMPLE\n"
"aws_secret_access_key = "
"wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY\n"
"aws_session_token = abcdef1234567890\n"
"\n";
ofs.close();
return p;
}
};
class DefaultCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override {
UnsetEnv("AWS_ACCESS_KEY_ID");
UnsetEnv("AWS_SECRET_ACCESS_KEY");
UnsetEnv("AWS_SESSION_TOKEN");
}
};
TEST_F(DefaultCredentialProviderTest, AnonymousCredentials) {
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
absl::flat_hash_map<std::string, HttpResponse>());
auto provider = std::make_unique<DefaultAwsCredentialsProvider>(
Options{{}, {}, {}, mock_transport});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_TRUE(credentials.IsAnonymous());
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials2,
provider->GetCredentials());
EXPECT_TRUE(credentials2.IsAnonymous());
EXPECT_EQ(credentials2.expires_at, absl::InfiniteFuture());
}
TEST_F(DefaultCredentialProviderTest, EnvironmentCredentialIdempotency) {
SetEnv("AWS_ACCESS_KEY_ID", "access");
SetEnv("AWS_SECRET_ACCESS_KEY", "secret");
SetEnv("AWS_SESSION_TOKEN", "token");
auto provider = std::make_unique<DefaultAwsCredentialsProvider>();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "access");
EXPECT_EQ(credentials.secret_key, "secret");
EXPECT_EQ(credentials.session_token, "token");
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials2,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, credentials2.access_key);
EXPECT_EQ(credentials.secret_key, credentials2.secret_key);
EXPECT_EQ(credentials.session_token, credentials2.session_token);
EXPECT_EQ(credentials.expires_at, credentials2.expires_at);
}
TEST_F(DefaultCredentialProviderTest, ConfigureFileProviderFromOptions) {
auto factory = CredentialFileFactory{};
auto credentials_file = factory.WriteCredentialsFile();
auto provider = std::make_unique<DefaultAwsCredentialsProvider>(
Options{credentials_file, "alice"});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
EXPECT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
EXPECT_EQ(credentials.session_token, "abcdef1234567890");
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials2,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, credentials2.access_key);
EXPECT_EQ(credentials.secret_key, credentials2.secret_key);
EXPECT_EQ(credentials.session_token, credentials2.session_token);
EXPECT_EQ(credentials.expires_at, credentials2.expires_at);
}
TEST_F(DefaultCredentialProviderTest, ConfigureEC2ProviderFromOptions) {
auto now = absl::Now();
auto stuck_clock = [&]() -> absl::Time { return now; };
auto expiry = now + absl::Seconds(200);
auto mock_transport = std::make_shared<DefaultMockHttpTransport>(
DefaultEC2MetadataFlow(kEndpoint, "1234", "ASIA1234567890",
"1234567890abcdef", "token", expiry));
auto provider = std::make_unique<DefaultAwsCredentialsProvider>(
Options{{}, {}, kEndpoint, mock_transport}, stuck_clock);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "ASIA1234567890");
EXPECT_EQ(credentials.secret_key, "1234567890abcdef");
EXPECT_EQ(credentials.session_token, "token");
EXPECT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
mock_transport->Reset(absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{404, absl::Cord{""}}},
});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "ASIA1234567890");
EXPECT_EQ(credentials.secret_key, "1234567890abcdef");
EXPECT_EQ(credentials.session_token, "token");
EXPECT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
now += absl::Seconds(300);
mock_transport->Reset(
DefaultEC2MetadataFlow(kEndpoint, "1234", "ASIA1234567890",
"1234567890abcdef", "TOKEN", expiry));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "ASIA1234567890");
EXPECT_EQ(credentials.secret_key, "1234567890abcdef");
EXPECT_EQ(credentials.session_token, "TOKEN");
EXPECT_EQ(credentials.expires_at, expiry - absl::Seconds(60));
mock_transport->Reset(absl::flat_hash_map<std::string, HttpResponse>{
{"POST http:
HttpResponse{404, absl::Cord{""}}},
});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
EXPECT_EQ(credentials.access_key, "");
EXPECT_EQ(credentials.secret_key, "");
EXPECT_EQ(credentials.session_token, "");
EXPECT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
} |
619 | cpp | google/tensorstore | file_credential_provider | tensorstore/kvstore/s3/credentials/file_credential_provider.cc | tensorstore/kvstore/s3/credentials/file_credential_provider_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_CREDENTIALS_FILE_CREDENTIAL_PROVIDER_H_
#define TENSORSTORE_KVSTORE_S3_CREDENTIALS_FILE_CREDENTIAL_PROVIDER_H_
#include <string>
#include <string_view>
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
class FileCredentialProvider : public AwsCredentialProvider {
private:
std::string filename_;
std::string profile_;
public:
FileCredentialProvider(std::string_view filename, std::string_view profile);
Result<AwsCredentials> GetCredentials() override;
inline const std::string& GetFileName() const { return filename_; }
inline const std::string& GetProfile() const { return profile_; }
};
}
}
#endif
#include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/lines/line_reading.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::JoinPath;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
static constexpr char kEnvAwsCredentialsFile[] = "AWS_SHARED_CREDENTIALS_FILE";
static constexpr char kDefaultAwsCredentialsFilePath[] = ".aws/credentials";
static constexpr char kCfgAwsAccessKeyId[] = "aws_access_key_id";
static constexpr char kCfgAwsSecretAccessKeyId[] = "aws_secret_access_key";
static constexpr char kCfgAwsSessionToken[] = "aws_session_token";
static constexpr char kEnvAwsProfile[] = "AWS_PROFILE";
static constexpr char kDefaultProfile[] = "default";
std::optional<std::string> GetAwsCredentialsFileName() {
if (auto credentials_file = GetEnv(kEnvAwsCredentialsFile);
credentials_file) {
return credentials_file;
}
if (auto home_dir = GetEnv("HOME"); home_dir) {
return JoinPath(*home_dir, kDefaultAwsCredentialsFilePath);
}
return std::nullopt;
}
}
FileCredentialProvider::FileCredentialProvider(std::string_view filename,
std::string_view profile)
: filename_(filename), profile_(profile) {
if (filename_.empty()) {
if (auto credentials_file = GetAwsCredentialsFileName(); credentials_file) {
filename_ = std::move(*credentials_file);
}
}
if (profile_.empty()) {
profile_ = GetEnv(kEnvAwsProfile).value_or(kDefaultProfile);
}
}
Result<AwsCredentials> FileCredentialProvider::GetCredentials() {
if (filename_.empty()) {
return absl::NotFoundError("No credentials file specified");
}
riegeli::FdReader reader(filename_);
if (!reader.ok()) {
return absl::NotFoundError(
absl::StrFormat("Could not open credentials file [%s]", filename_));
}
AwsCredentials credentials{};
std::string_view line;
bool profile_found = false;
while (riegeli::ReadLine(reader, line)) {
auto sline = absl::StripAsciiWhitespace(line);
if (sline.empty() || sline[0] == '#') continue;
if (sline[0] == '[' && sline[sline.size() - 1] == ']') {
if (profile_found) break;
auto section_name =
absl::StripAsciiWhitespace(sline.substr(1, sline.size() - 2));
ABSL_LOG_IF(INFO, s3_logging) << "Found section name [" << section_name
<< "] in file [" << filename_ << "]";
profile_found = (section_name == profile_);
continue;
}
if (profile_found) {
std::pair<std::string_view, std::string_view> kv =
absl::StrSplit(sline, absl::MaxSplits('=', 1));
kv.first = absl::StripAsciiWhitespace(kv.first);
kv.second = absl::StripAsciiWhitespace(kv.second);
if (kv.first == kCfgAwsAccessKeyId) {
credentials.access_key = kv.second;
} else if (kv.first == kCfgAwsSecretAccessKeyId) {
credentials.secret_key = kv.second;
} else if (kv.first == kCfgAwsSessionToken) {
credentials.session_token = kv.second;
}
}
}
if (!profile_found) {
return absl::NotFoundError(
absl::StrFormat("Profile [%s] not found in credentials file [%s]",
profile_, filename_));
}
ABSL_LOG_FIRST_N(INFO, 1)
<< "Using profile [" << profile_ << "] in file [" << filename_ << "]";
credentials.expires_at = absl::InfiniteFuture();
return credentials;
}
}
} | #include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_kvstore_s3::FileCredentialProvider;
class TestData
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteCredentialsFile() {
auto p = JoinPath(path(), "aws_config");
std::ofstream ofs(p);
ofs << "discarded_value = 500\n"
"\n"
"[default]\n"
"aws_access_key_id =AKIAIOSFODNN7EXAMPLE\n"
"aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n"
"aws_session_token= abcdef1234567890 \n"
"\n"
"[alice]\n"
"aws_access_key_id = AKIAIOSFODNN6EXAMPLE\n"
"aws_secret_access_key = "
"wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY\n"
"\n";
ofs.close();
return p;
}
};
class FileCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override {
UnsetEnv("AWS_SHARED_CREDENTIALS_FILE");
UnsetEnv("AWS_PROFILE");
}
};
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileDefault) {
TestData test_data;
std::string credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
auto provider = FileCredentialProvider("", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "default");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "abcdef1234567890");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest,
ProviderAwsCredentialsFromFileProfileOverride) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
auto provider = FileCredentialProvider("", "alice");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileProfileEnv) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
SetEnv("AWS_PROFILE", "alice");
auto provider = FileCredentialProvider("", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest,
ProviderAwsCredentialsFromFileInvalidProfileEnv) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
SetEnv("AWS_PROFILE", "bob");
auto provider = FileCredentialProvider("", "");
ASSERT_FALSE(provider.GetCredentials().ok());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "bob");
}
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileOverride) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
auto provider =
std::make_unique<FileCredentialProvider>(credentials_filename, "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
ASSERT_EQ(provider->GetFileName(), credentials_filename);
ASSERT_EQ(provider->GetProfile(), "default");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "abcdef1234567890");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
provider =
std::make_unique<FileCredentialProvider>(credentials_filename, "alice");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetFileName(), credentials_filename);
ASSERT_EQ(provider->GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
} |
620 | cpp | google/tensorstore | environment_credential_provider | tensorstore/kvstore/s3/credentials/environment_credential_provider.cc | tensorstore/kvstore/s3/credentials/environment_credential_provider_test.cc | #ifndef TENSORSTORE_KVSTORE_S3_CREDENTIALS_ENVIRONMENT_CREDENTIAL_PROVIDER_H_
#define TENSORSTORE_KVSTORE_S3_CREDENTIALS_ENVIRONMENT_CREDENTIAL_PROVIDER_H_
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_s3 {
class EnvironmentCredentialProvider : public AwsCredentialProvider {
public:
Result<AwsCredentials> GetCredentials() override;
};
}
}
#endif
#include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
using ::tensorstore::internal::GetEnv;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kEnvAwsAccessKeyId[] = "AWS_ACCESS_KEY_ID";
static constexpr char kEnvAwsSecretAccessKey[] = "AWS_SECRET_ACCESS_KEY";
static constexpr char kEnvAwsSessionToken[] = "AWS_SESSION_TOKEN";
}
Result<AwsCredentials> EnvironmentCredentialProvider::GetCredentials() {
auto access_key = GetEnv(kEnvAwsAccessKeyId);
if (!access_key) {
return absl::NotFoundError(absl::StrCat(kEnvAwsAccessKeyId, " not set"));
}
auto secret_key = GetEnv(kEnvAwsSecretAccessKey);
if (!secret_key) {
return absl::NotFoundError(
absl::StrCat(kEnvAwsSecretAccessKey, " not set"));
}
ABSL_LOG_FIRST_N(INFO, 1)
<< "Using Environment Variable " << kEnvAwsAccessKeyId;
auto credentials = AwsCredentials{*access_key, *secret_key};
if (auto session_token = GetEnv(kEnvAwsSessionToken); session_token) {
credentials.session_token = *session_token;
}
credentials.expires_at = absl::InfiniteFuture();
return credentials;
}
}
} | #include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h"
#include <gtest/gtest.h>
#include "tensorstore/internal/env.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_kvstore_s3::EnvironmentCredentialProvider;
class EnvironmentCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override {
for (const char* var :
{"AWS_SHARED_CREDENTIALS_FILE", "AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "AWS_PROFILE"}) {
UnsetEnv(var);
}
}
};
#ifndef _WIN32
TEST_F(EnvironmentCredentialProviderTest, ProviderNoCredentials) {
auto provider = EnvironmentCredentialProvider();
ASSERT_FALSE(provider.GetCredentials().ok());
SetEnv("AWS_ACCESS_KEY_ID", "foo");
SetEnv("AWS_SECRET_ACCESS_KEY", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(credentials.access_key, "foo");
ASSERT_TRUE(credentials.secret_key.empty());
ASSERT_TRUE(credentials.session_token.empty());
}
#endif
TEST_F(EnvironmentCredentialProviderTest, ProviderAwsCredentialsFromEnv) {
SetEnv("AWS_ACCESS_KEY_ID", "foo");
SetEnv("AWS_SECRET_ACCESS_KEY", "bar");
SetEnv("AWS_SESSION_TOKEN", "qux");
auto provider = EnvironmentCredentialProvider();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(credentials.access_key, "foo");
ASSERT_EQ(credentials.secret_key, "bar");
ASSERT_EQ(credentials.session_token, "qux");
}
} |
621 | cpp | google/tensorstore | gcs_testbench | tensorstore/kvstore/gcs/gcs_testbench.cc | tensorstore/kvstore/gcs_http/gcs_testbench_test.cc | #ifndef TENSORSTORE_KVSTORE_GCS_GCS_TESTBENCH_H_
#define TENSORSTORE_KVSTORE_GCS_GCS_TESTBENCH_H_
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/internal/os/subprocess.h"
namespace gcs_testbench {
class StorageTestbench {
public:
StorageTestbench();
~StorageTestbench();
void SpawnProcess();
static absl::Status CreateBucket(std::string grpc_endpoint,
std::string bucket);
std::string http_address();
std::string grpc_address();
int http_port;
int grpc_port;
bool running = false;
std::optional<tensorstore::internal::Subprocess> child;
};
}
#endif
#include "tensorstore/kvstore/gcs/gcs_testbench.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/status.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/transport_test_utils.h"
#include "tensorstore/internal/os/subprocess.h"
#include "tensorstore/proto/parse_text_proto_or_die.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "google/storage/v2/storage.grpc.pb.h"
#include "google/storage/v2/storage.pb.h"
ABSL_FLAG(std::string, testbench_binary, "",
"Path to the gcs storage-testbench rest_server");
namespace gcs_testbench {
using ::google::storage::v2::Storage;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
using ::tensorstore::internal::SpawnSubprocess;
using ::tensorstore::internal::Subprocess;
using ::tensorstore::internal::SubprocessOptions;
using ::tensorstore::internal_http::GetDefaultHttpTransport;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::transport_test_utils::TryPickUnusedPort;
StorageTestbench::StorageTestbench() = default;
std::string StorageTestbench::http_address() {
return absl::StrFormat("localhost:%d", http_port);
}
std::string StorageTestbench::grpc_address() {
return absl::StrFormat("localhost:%d", grpc_port);
}
void StorageTestbench::SpawnProcess() {
if (running) return;
const auto start_child = [&] {
http_port = TryPickUnusedPort().value_or(0);
ABSL_CHECK(http_port > 0);
ABSL_LOG(INFO) << "Spawning testbench: http:
{
SubprocessOptions options{absl::GetFlag(FLAGS_testbench_binary),
{absl::StrFormat("--port=%d", http_port)}};
TENSORSTORE_CHECK_OK_AND_ASSIGN(child, SpawnSubprocess(options));
}
};
start_child();
for (auto deadline = absl::Now() + absl::Seconds(30);;) {
absl::SleepFor(absl::Milliseconds(200));
if (!absl::IsUnavailable(child->Join(false).status())) {
start_child();
}
auto result =
GetDefaultHttpTransport()
->IssueRequest(
HttpRequestBuilder(
"GET", absl::StrFormat("http:
http_port))
.BuildRequest(),
IssueRequestOptions()
.SetRequestTimeout(absl::Seconds(15))
.SetConnectTimeout(absl::Seconds(15)))
.result();
if (result.ok()) {
if (result->status_code != 200) {
ABSL_LOG(ERROR) << "Failed to start grpc server: " << *result;
} else if (!absl::SimpleAtoi(result->payload.Flatten(), &grpc_port)) {
ABSL_LOG(ERROR) << "Unexpected response from start_grpc: " << *result;
} else {
break;
}
} else {
ABSL_LOG(ERROR) << "Failed to start grpc server: " << result.status();
}
if (absl::Now() < deadline && absl::IsUnavailable(result.status())) {
continue;
}
ABSL_LOG(FATAL) << "Failed to start testbench: " << result.status();
}
running = true;
}
StorageTestbench::~StorageTestbench() {
if (child) {
child->Kill().IgnoreError();
auto join_result = child->Join();
if (!join_result.ok()) {
ABSL_LOG(ERROR) << "Joining storage_testbench subprocess failed: "
<< join_result.status();
}
}
}
absl::Status StorageTestbench::CreateBucket(std::string grpc_endpoint,
std::string bucket) {
google::storage::v2::CreateBucketRequest bucket_request =
tensorstore::ParseTextProtoOrDie(R"pb(
parent: 'projects/12345'
bucket: { location: 'US' storage_class: 'STANDARD' }
bucket_id: 'bucket'
predefined_acl: 'publicReadWrite'
predefined_default_object_acl: 'publicReadWrite'
)pb");
bucket_request.set_bucket_id(bucket);
google::storage::v2::Bucket bucket_response;
std::shared_ptr<grpc::Channel> channel = grpc::CreateChannel(
grpc_endpoint, grpc::InsecureChannelCredentials());
if (!channel->WaitForConnected(
absl::ToChronoTime(absl::Now() + absl::Milliseconds(100)))) {
ABSL_LOG(WARNING) << "Failed to connect to grpc endpoint after 100ms: "
<< grpc_endpoint;
}
auto stub = Storage::NewStub(std::move(channel));
grpc::ClientContext client_context;
grpc::Status status =
stub->CreateBucket(&client_context, bucket_request, &bucket_response);
return GrpcStatusToAbslStatus(status);
}
} | #include "tensorstore/kvstore/gcs/gcs_testbench.h"
#include <stddef.h>
#include <cstring>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/call_once.h"
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/thread/thread.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace kvstore = ::tensorstore::kvstore;
using ::gcs_testbench::StorageTestbench;
using ::tensorstore::KvStore;
using ::tensorstore::StorageGeneration;
namespace {
StorageTestbench& GetTestBench() {
static absl::NoDestructor<StorageTestbench> testbench;
static absl::once_flag init_once;
absl::call_once(init_once, [&]() {
testbench->SpawnProcess();
static std::string http_address = testbench->http_address();
::tensorstore::internal::SetEnv("TENSORSTORE_GCS_HTTP_URL",
http_address.c_str());
::tensorstore::internal::SetEnv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "abc");
ABSL_LOG(INFO) << "Using " << http_address;
ABSL_LOG(INFO) << "Creating bucket: "
<< StorageTestbench::CreateBucket(testbench->grpc_address(),
"test_bucket");
});
return *testbench;
}
class GcsTestbenchTest : public testing::Test {
public:
tensorstore::KvStore OpenStore(std::string path = "") {
GetTestBench();
return kvstore::Open(
{{"driver", "gcs"}, {"bucket", "test_bucket"}, {"path", path}})
.value();
}
};
TEST_F(GcsTestbenchTest, Basic) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(GcsTestbenchTest, DeletePrefix) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST_F(GcsTestbenchTest, DeleteRange) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST_F(GcsTestbenchTest, DeleteRangeToEnd) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST_F(GcsTestbenchTest, DeleteRangeFromBeginning) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
TEST_F(GcsTestbenchTest, List) {
auto store = OpenStore("list/");
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST_F(GcsTestbenchTest, CancellationDoesNotCrash) {
auto store = OpenStore("cancellation/");
static constexpr size_t kCount = 1000;
std::vector<std::string> keys;
keys.reserve(kCount);
for (size_t i = 0; i < kCount; ++i) {
keys.push_back(absl::StrCat(i));
}
absl::Cord value("xyzzyx");
std::vector<tensorstore::AnyFuture> futures;
futures.reserve(kCount * 2);
for (const auto& key : keys) {
futures.push_back(kvstore::Write(store, key, value));
}
for (const auto& key : keys) {
futures.push_back(kvstore::Read(store, key));
}
futures = {};
for (const auto& key : keys) {
futures.push_back(kvstore::Delete(store, key));
}
for (auto& future : futures) {
future.Wait();
}
}
TEST_F(GcsTestbenchTest, ConcurrentWrites) {
tensorstore::internal::TestConcurrentWritesOptions options;
auto store = OpenStore("concurrent_writes/");
options.get_store = [&] { return store; };
options.num_iterations = 0x3f;
tensorstore::internal::TestConcurrentWrites(options);
}
} |
622 | cpp | google/tensorstore | kvstore_server | tensorstore/kvstore/tsgrpc/kvstore_server.cc | tensorstore/kvstore/tsgrpc/kvstore_server_test.cc | #ifndef TENSORSTORE_KVSTORE_TSGRPC_KVSTORE_SERVER_H_
#define TENSORSTORE_KVSTORE_TSGRPC_KVSTORE_SERVER_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace grpc_kvstore {
class KvStoreServer {
public:
struct Spec {
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(Spec, JsonSerializationOptions,
JsonSerializationOptions);
std::vector<std::string> bind_addresses;
kvstore::Spec base;
};
static Result<KvStoreServer> Start(Spec spec,
Context context = Context::Default());
KvStoreServer();
KvStoreServer(KvStoreServer&&);
KvStoreServer& operator=(KvStoreServer&&);
~KvStoreServer();
void Wait();
int port() const;
tensorstore::span<const int> ports() const;
private:
class Impl;
std::unique_ptr<Impl> impl_;
};
}
}
#endif
#include "tensorstore/kvstore/tsgrpc/kvstore_server.h"
#include <stddef.h>
#include <atomic>
#include <cassert>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/server_callback.h"
#include "grpcpp/support/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/grpc/server_credentials.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/tsgrpc/common.h"
#include "tensorstore/kvstore/tsgrpc/common.pb.h"
#include "tensorstore/kvstore/tsgrpc/handler_template.h"
#include "tensorstore/proto/encode_time.h"
#include "tensorstore/proto/proto_util.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.grpc.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.pb.h"
#include "tensorstore/util/span.h"
using ::grpc::CallbackServerContext;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore_grpc::EncodeGenerationAndTimestamp;
using ::tensorstore_grpc::Handler;
using ::tensorstore_grpc::StreamHandler;
using ::tensorstore_grpc::kvstore::DeleteRequest;
using ::tensorstore_grpc::kvstore::DeleteResponse;
using ::tensorstore_grpc::kvstore::ListRequest;
using ::tensorstore_grpc::kvstore::ListResponse;
using ::tensorstore_grpc::kvstore::ReadRequest;
using ::tensorstore_grpc::kvstore::ReadResponse;
using ::tensorstore_grpc::kvstore::WriteRequest;
using ::tensorstore_grpc::kvstore::WriteResponse;
using ::tensorstore_grpc::kvstore::grpc_gen::KvStoreService;
namespace jb = ::tensorstore::internal_json_binding;
namespace tensorstore {
namespace {
auto& read_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/grpc_server/read", "KvStoreService::Read calls");
auto& write_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/grpc_server/write", "KvStoreService::Write calls");
auto& delete_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/grpc_server/delete", "KvStoreService::Delete calls");
auto& list_metric = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/grpc_server/list", "KvStoreService::List calls");
ABSL_CONST_INIT internal_log::VerboseFlag verbose_logging("tsgrpc_kvstore");
class ReadHandler final : public Handler<ReadRequest, ReadResponse> {
using Base = Handler<ReadRequest, ReadResponse>;
public:
ReadHandler(CallbackServerContext* grpc_context, const Request* request,
Response* response, KvStore kvstore)
: Base(grpc_context, request, response), kvstore_(std::move(kvstore)) {}
void Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "ReadHandler " << ConciseDebugString(*request());
kvstore::ReadOptions options{};
options.generation_conditions.if_equal.value =
request()->generation_if_equal();
options.generation_conditions.if_not_equal.value =
request()->generation_if_not_equal();
if (request()->has_byte_range()) {
options.byte_range.inclusive_min =
request()->byte_range().inclusive_min();
options.byte_range.exclusive_max =
request()->byte_range().exclusive_max();
if (!options.byte_range.SatisfiesInvariants()) {
Finish(absl::InvalidArgumentError("Invalid byte range"));
return;
}
}
if (request()->has_staleness_bound()) {
TENSORSTORE_ASSIGN_OR_RETURN(
options.staleness_bound,
internal::ProtoToAbslTime(request()->staleness_bound()), Finish(_));
}
internal::IntrusivePtr<ReadHandler> self{this};
future_ =
PromiseFuturePair<void>::Link(
[self = std::move(self)](tensorstore::Promise<void> promise,
auto read_result) {
if (!promise.result_needed()) return;
promise.SetResult(self->HandleResult(read_result.result()));
},
tensorstore::kvstore::Read(kvstore_, request()->key(), options))
.future;
}
void OnCancel() final {
if (future_.ready()) return;
future_ = {};
Finish(::grpc::Status(::grpc::StatusCode::CANCELLED, ""));
}
absl::Status HandleResult(const Result<kvstore::ReadResult>& result) {
auto status = result.status();
if (status.ok()) {
auto& r = result.value();
response()->set_state(static_cast<ReadResponse::State>(r.state));
EncodeGenerationAndTimestamp(r.stamp, response());
if (r.has_value()) {
response()->set_value(r.value);
}
}
Finish(status);
return status;
}
private:
KvStore kvstore_;
Future<void> future_;
};
class WriteHandler final : public Handler<WriteRequest, WriteResponse> {
using Base = Handler<WriteRequest, WriteResponse>;
public:
WriteHandler(CallbackServerContext* grpc_context, const Request* request,
Response* response, KvStore kvstore)
: Base(grpc_context, request, response), kvstore_(std::move(kvstore)) {}
void Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "WriteHandler " << ConciseDebugString(*request());
tensorstore::kvstore::WriteOptions options{};
options.generation_conditions.if_equal.value =
request()->generation_if_equal();
internal::IntrusivePtr<WriteHandler> self{this};
future_ =
PromiseFuturePair<void>::Link(
[self = std::move(self)](Promise<void> promise, auto write_result) {
if (!promise.result_needed()) return;
promise.SetResult(self->HandleResult(write_result.result()));
},
kvstore::Write(kvstore_, request()->key(),
absl::Cord(request()->value()), options))
.future;
}
void OnCancel() final {
if (future_.ready()) return;
future_ = {};
Finish(::grpc::Status(::grpc::StatusCode::CANCELLED, ""));
}
absl::Status HandleResult(
const tensorstore::Result<TimestampedStorageGeneration>& result) {
auto status = result.status();
if (status.ok()) {
EncodeGenerationAndTimestamp(result.value(), response());
}
Finish(status);
return status;
}
private:
KvStore kvstore_;
Future<void> future_;
};
class DeleteHandler final : public Handler<DeleteRequest, DeleteResponse> {
using Base = Handler<DeleteRequest, DeleteResponse>;
public:
DeleteHandler(CallbackServerContext* grpc_context, const Request* request,
Response* response, KvStore kvstore)
: Base(grpc_context, request, response), kvstore_(std::move(kvstore)) {}
void Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "DeleteHandler " << ConciseDebugString(*request());
internal::IntrusivePtr<DeleteHandler> self{this};
auto callback = [self = std::move(self)](Promise<void> promise,
auto del_result) {
if (!promise.result_needed()) return;
promise.SetResult(self->HandleResult(del_result.result()));
};
if (request()->has_range()) {
future_ = PromiseFuturePair<void>::Link(
std::move(callback),
kvstore::DeleteRange(
kvstore_, KeyRange(request()->range().inclusive_min(),
request()->range().exclusive_max())))
.future;
} else if (!request()->key().empty()) {
kvstore::WriteOptions options{};
options.generation_conditions.if_equal.value =
request()->generation_if_equal();
future_ =
PromiseFuturePair<void>::Link(
std::move(callback),
tensorstore::kvstore::Delete(kvstore_, request()->key(), options))
.future;
} else {
Finish(absl::InvalidArgumentError("Invalid request"));
}
}
void OnCancel() final {
if (future_.ready()) return;
future_ = {};
Finish(::grpc::Status(::grpc::StatusCode::CANCELLED, ""));
}
absl::Status HandleResult(const tensorstore::Result<void>& result) {
auto status = result.status();
Finish(status);
return status;
}
absl::Status HandleResult(
const tensorstore::Result<TimestampedStorageGeneration>& result) {
auto status = result.status();
if (status.ok()) {
EncodeGenerationAndTimestamp(result.value(), response());
}
Finish(status);
return status;
}
private:
tensorstore::KvStore kvstore_;
tensorstore::Future<void> future_;
};
class ListHandler final : public StreamHandler<ListRequest, ListResponse> {
using Base = StreamHandler<ListRequest, ListResponse>;
public:
ListHandler(CallbackServerContext* grpc_context, const Request* request,
tensorstore::KvStore kvstore)
: Base(grpc_context, request),
kvstore_(std::move(kvstore)),
estimated_size_(0),
cancel_([] {}) {}
void Run();
void OnCancel() final { cancel_(); }
void OnWriteDone(bool ok) final {
absl::MutexLock l(&mu_);
in_flight_msg_ = nullptr;
MaybeWrite();
}
void MaybeWrite() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (in_flight_msg_ != nullptr) return;
if (!current_) return;
if (done_) {
if (!status_.ok()) {
current_ = nullptr;
Finish(status_);
} else if (current_->entry().empty()) {
current_ = nullptr;
Finish(grpc::Status::OK);
} else {
in_flight_msg_ = std::move(current_);
StartWriteAndFinish(in_flight_msg_.get(), {}, grpc::Status::OK);
}
return;
}
constexpr size_t kTargetSize = 16 * 1024;
if (estimated_size_ < kTargetSize) return;
in_flight_msg_ = std::move(current_);
StartWrite(in_flight_msg_.get());
current_ = std::make_unique<ListResponse>();
estimated_size_ = 0;
}
[[maybe_unused]] friend void set_starting(
internal::IntrusivePtr<ListHandler>& self, AnyCancelReceiver cancel) {
absl::MutexLock l(&self->mu_);
self->cancel_ = std::move(cancel);
self->done_ = false;
self->current_ = std::make_unique<ListResponse>();
self->estimated_size_ = 0;
}
[[maybe_unused]] friend void set_value(
internal::IntrusivePtr<ListHandler>& self, ListEntry entry) {
absl::MutexLock l(&self->mu_);
auto* e = self->current_->add_entry();
e->set_key(entry.key);
e->set_size(entry.size);
self->estimated_size_ += entry.key.size();
self->MaybeWrite();
}
[[maybe_unused]] friend void set_done(
internal::IntrusivePtr<ListHandler>& self) {
self->cancel_ = [] {};
}
[[maybe_unused]] friend void set_error(
internal::IntrusivePtr<ListHandler>& self, absl::Status s) {
absl::MutexLock l(&self->mu_);
self->cancel_ = [] {};
self->status_ = s;
}
[[maybe_unused]] friend void set_stopping(
internal::IntrusivePtr<ListHandler>& self) {
absl::MutexLock l(&self->mu_);
self->done_ = true;
self->MaybeWrite();
}
private:
tensorstore::KvStore kvstore_;
absl::Mutex mu_;
absl::Status status_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<ListResponse> current_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<ListResponse> in_flight_msg_ ABSL_GUARDED_BY(mu_);
size_t estimated_size_ ABSL_GUARDED_BY(mu_);
tensorstore::AnyCancelReceiver cancel_;
std::atomic<bool> done_{true};
};
void ListHandler::Run() {
ABSL_LOG_IF(INFO, verbose_logging)
<< "ListHandler " << ConciseDebugString(*request());
tensorstore::kvstore::ListOptions options;
options.range = tensorstore::KeyRange(request()->range().inclusive_min(),
request()->range().exclusive_max());
options.strip_prefix_length = request()->strip_prefix_length();
if (request()->has_staleness_bound()) {
TENSORSTORE_ASSIGN_OR_RETURN(
options.staleness_bound,
internal::ProtoToAbslTime(request()->staleness_bound()), Finish(_));
}
internal::IntrusivePtr<ListHandler> self{this};
tensorstore::execution::submit(
tensorstore::kvstore::List(self->kvstore_, options), self);
}
}
namespace grpc_kvstore {
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
KvStoreServer::Spec,
jb::Object(jb::Member("base", jb::Projection<&KvStoreServer::Spec::base>()),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->base.path);
return absl::OkStatus();
}),
jb::Member("bind_addresses",
jb::Projection<&KvStoreServer::Spec::bind_addresses>(
jb::DefaultInitializedValue()))));
class KvStoreServer::Impl final : public KvStoreService::CallbackService {
public:
Impl(KvStore kvstore) : kvstore_(std::move(kvstore)) {}
::grpc::ServerUnaryReactor* Read(::grpc::CallbackServerContext* context,
const ReadRequest* request,
ReadResponse* response) override {
read_metric.Increment();
internal::IntrusivePtr<ReadHandler> handler(
new ReadHandler(context, request, response, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
assert(handler->use_count() > 0);
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
::grpc::ServerUnaryReactor* Write(::grpc::CallbackServerContext* context,
const WriteRequest* request,
WriteResponse* response) override {
write_metric.Increment();
internal::IntrusivePtr<WriteHandler> handler(
new WriteHandler(context, request, response, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
assert(handler->use_count() > 0);
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
::grpc::ServerUnaryReactor* Delete(::grpc::CallbackServerContext* context,
const DeleteRequest* request,
DeleteResponse* response) override {
delete_metric.Increment();
internal::IntrusivePtr<DeleteHandler> handler(
new DeleteHandler(context, request, response, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
assert(handler->use_count() > 0);
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
::grpc::ServerWriteReactor< ::tensorstore_grpc::kvstore::ListResponse>* List(
::grpc::CallbackServerContext* context,
const ListRequest* request) override {
list_metric.Increment();
internal::IntrusivePtr<ListHandler> handler(
new ListHandler(context, request, kvstore_));
assert(handler->use_count() == 2);
handler->Run();
if (handler->use_count() == 1) return nullptr;
return handler.get();
}
const KvStore& kvstore() const { return kvstore_; }
private:
friend class KvStoreServer;
KvStore kvstore_;
std::vector<int> listening_ports_;
std::unique_ptr<grpc::Server> server_;
};
KvStoreServer::KvStoreServer() = default;
KvStoreServer::~KvStoreServer() = default;
KvStoreServer::KvStoreServer(KvStoreServer&&) = default;
KvStoreServer& KvStoreServer::operator=(KvStoreServer&&) = default;
tensorstore::span<const int> KvStoreServer::ports() const {
return impl_->listening_ports_;
}
int KvStoreServer::port() const { return impl_->listening_ports_.front(); }
void KvStoreServer::Wait() { impl_->server_->Wait(); }
tensorstore::Result<KvStoreServer> KvStoreServer::Start(Spec spec,
Context context) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto kv, tensorstore::kvstore::Open(spec.base, context).result());
auto impl = std::make_unique<KvStoreServer::Impl>(std::move(kv));
auto creds = context.GetResource<tensorstore::GrpcServerCredentials>()
.value()
->GetCredentials();
grpc::ServerBuilder builder;
builder.RegisterService(impl.get());
if (spec.bind_addresses.empty()) {
spec.bind_addresses.push_back("[::]:0");
}
impl->listening_ports_.resize(spec.bind_addresses.size());
for (size_t i = 0; i < spec.bind_addresses.size(); ++i) {
builder.AddListeningPort(spec.bind_addresses[i], creds,
&impl->listening_ports_[i]);
}
impl->server_ = builder.BuildAndStart();
KvStoreServer server;
server.impl_ = std::move(impl);
return server;
}
}
} | #include "tensorstore/kvstore/tsgrpc/kvstore_server.h"
#include <string>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/notification.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::KeyRange;
using ::tensorstore::grpc_kvstore::KvStoreServer;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
class KvStoreSingleton {
public:
KvStoreSingleton() : ctx_(tensorstore::Context::Default()) {
server_ = KvStoreServer::Start(KvStoreServer::Spec::FromJson(
{
{"bind_addresses", {"localhost:0"}},
{"base", "memory:
})
.value(),
ctx_)
.value();
address_ = absl::StrFormat("localhost:%d", server_.port());
}
const std::string& address() const { return address_; }
private:
tensorstore::Context ctx_;
KvStoreServer server_;
std::string address_;
};
const KvStoreSingleton& GetSingleton() {
static const KvStoreSingleton* const kSingleton = new KvStoreSingleton();
return *kSingleton;
}
class KvStoreTest : public testing::Test {
public:
const std::string& address() const { return GetSingleton().address(); }
};
TEST_F(KvStoreTest, Basic) {
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::kvstore::Open({{"driver", "tsgrpc_kvstore"},
{"address", address()},
{"path", "basic/"}},
context)
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(KvStoreTest, DeleteRange) {
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::kvstore::Open({{"driver", "tsgrpc_kvstore"},
{"address", address()},
{"path", "delete_range/"}},
context)
.result());
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/b", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/d", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/x", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/y", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/e", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/f", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::DeleteRange(store, KeyRange::Prefix("a/c")));
EXPECT_EQ("xyz", kvstore::Read(store, "a/b").value().value);
EXPECT_EQ("xyz", kvstore::Read(store, "a/d").value().value);
EXPECT_THAT(kvstore::Read(store, "a/c/x").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Read(store, "a/c/y").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Read(store, "a/c/z/e").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Read(store, "a/c/z/f").result(),
MatchesKvsReadResultNotFound());
}
TEST_F(KvStoreTest, List) {
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::kvstore::Open({{"driver", "tsgrpc_kvstore"},
{"address", address()},
{"path", "list/"}},
context)
.result());
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/b", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/d", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/x", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/y", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/e", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/c/z/f", absl::Cord("xyz")));
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/d", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e", "set_value: a/c/x",
"set_value: a/b", "set_done", "set_stopping"));
}
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {KeyRange::Prefix("a/c/")}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e",
"set_value: a/c/x", "set_done", "set_stopping"));
}
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::CancelOnStartingReceiver{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_done",
"set_stopping"));
}
{
std::vector<std::string> log;
absl::Notification notification;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::CancelAfterNReceiver<2>{{&log}}});
notification.WaitForNotification();
EXPECT_THAT(log,
::testing::ElementsAre(
"set_starting",
::testing::AnyOf("set_value: a/d", "set_value: a/c/z/f",
"set_value: a/c/y", "set_value: a/c/z/e",
"set_value: a/c/x", "set_value: a/b"),
"set_done", "set_stopping"));
}
}
} |
623 | cpp | google/tensorstore | memory_key_value_store | tensorstore/kvstore/memory/memory_key_value_store.cc | tensorstore/kvstore/memory/memory_key_value_store_test.cc | #ifndef TENSORSTORE_KVSTORE_MEMORY_MEMORY_KEY_VALUE_STORE_H_
#define TENSORSTORE_KVSTORE_MEMORY_MEMORY_KEY_VALUE_STORE_H_
#include "tensorstore/kvstore/kvstore.h"
namespace tensorstore {
kvstore::DriverPtr GetMemoryKeyValueStore(bool atomic = true);
}
#endif
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal_kvstore::DeleteRangeEntry;
using ::tensorstore::internal_kvstore::kReadModifyWrite;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::kvstore::SupportedFeatures;
TimestampedStorageGeneration GenerationNow(StorageGeneration generation) {
return TimestampedStorageGeneration{std::move(generation), absl::Now()};
}
struct StoredKeyValuePairs
: public internal::AtomicReferenceCount<StoredKeyValuePairs> {
using Ptr = internal::IntrusivePtr<StoredKeyValuePairs>;
struct ValueWithGenerationNumber {
absl::Cord value;
uint64_t generation_number;
StorageGeneration generation() const {
return StorageGeneration::FromUint64(generation_number);
}
};
using Map = absl::btree_map<std::string, ValueWithGenerationNumber>;
std::pair<Map::iterator, Map::iterator> Find(const std::string& inclusive_min,
const std::string& exclusive_max)
ABSL_SHARED_LOCKS_REQUIRED(mutex) {
return {values.lower_bound(inclusive_min),
exclusive_max.empty() ? values.end()
: values.lower_bound(exclusive_max)};
}
std::pair<Map::iterator, Map::iterator> Find(const KeyRange& range)
ABSL_SHARED_LOCKS_REQUIRED(mutex) {
return Find(range.inclusive_min, range.exclusive_max);
}
absl::Mutex mutex;
uint64_t next_generation_number ABSL_GUARDED_BY(mutex) = 0;
Map values ABSL_GUARDED_BY(mutex);
};
struct MemoryKeyValueStoreResource
: public internal::ContextResourceTraits<MemoryKeyValueStoreResource> {
constexpr static char id[] = "memory_key_value_store";
struct Spec {};
using Resource = StoredKeyValuePairs::Ptr;
static Spec Default() { return {}; }
static constexpr auto JsonBinder() { return jb::Object(); }
static Result<Resource> Create(
Spec, internal::ContextResourceCreationContext context) {
return StoredKeyValuePairs::Ptr(new StoredKeyValuePairs);
}
static Spec GetSpec(const Resource&,
const internal::ContextSpecBuilder& builder) {
return {};
}
};
const internal::ContextResourceRegistration<MemoryKeyValueStoreResource>
resource_registration;
struct MemoryDriverSpecData {
Context::Resource<MemoryKeyValueStoreResource> memory_key_value_store;
bool atomic = true;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.memory_key_value_store, x.atomic);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
MemoryKeyValueStoreResource::id,
jb::Projection<&MemoryDriverSpecData::memory_key_value_store>()),
jb::Member("atomic", jb::Projection<&MemoryDriverSpecData::atomic>(
jb::DefaultValue([](auto* y) { *y = true; }))));
};
class MemoryDriverSpec
: public internal_kvstore::RegisteredDriverSpec<MemoryDriverSpec,
MemoryDriverSpecData> {
public:
static constexpr char id[] = "memory";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return tensorstore::StrCat(id, ":
}
};
class MemoryDriver
: public internal_kvstore::RegisteredDriver<MemoryDriver,
MemoryDriverSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override;
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) override;
class TransactionNode;
StoredKeyValuePairs& data() { return **spec_.memory_key_value_store; }
absl::Status GetBoundSpecData(MemoryDriverSpecData& spec) const {
spec = spec_;
return absl::Status();
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
SpecData spec_;
};
Future<kvstore::DriverPtr> MemoryDriverSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<MemoryDriver>();
driver->spec_ = data_;
return driver;
}
using BufferedReadModifyWriteEntry =
internal_kvstore::AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry;
class MemoryDriver::TransactionNode
: public internal_kvstore::AtomicTransactionNode {
using Base = internal_kvstore::AtomicTransactionNode;
public:
using Base::Base;
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override
ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (!single_phase_mutation.remaining_entries_.HasError()) {
auto& data = static_cast<MemoryDriver&>(*this->driver()).data();
TimestampedStorageGeneration generation;
UniqueWriterLock lock(data.mutex);
absl::Time commit_time = absl::Now();
if (!ValidateEntryConditions(data, single_phase_mutation, commit_time)) {
lock.unlock();
this->RetryAtomicWriteback(commit_time);
return;
}
ApplyMutation(data, single_phase_mutation, commit_time);
lock.unlock();
this->AtomicCommitWritebackSuccess();
} else {
internal_kvstore::WritebackError(single_phase_mutation);
}
MultiPhaseMutation::AllEntriesDone(single_phase_mutation);
}
static bool ValidateEntryConditions(
StoredKeyValuePairs& data,
internal_kvstore::SinglePhaseMutation& single_phase_mutation,
const absl::Time& commit_time) ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
bool validated = true;
for (auto& entry : single_phase_mutation.entries_) {
if (!ValidateEntryConditions(data, entry, commit_time)) {
validated = false;
}
}
return validated;
}
static bool ValidateEntryConditions(StoredKeyValuePairs& data,
internal_kvstore::MutationEntry& entry,
const absl::Time& commit_time)
ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
if (entry.entry_type() == kReadModifyWrite) {
return ValidateEntryConditions(
data, static_cast<BufferedReadModifyWriteEntry&>(entry), commit_time);
}
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
bool validated = true;
for (auto& deleted_entry : dr_entry.superseded_) {
if (!ValidateEntryConditions(
data, static_cast<BufferedReadModifyWriteEntry&>(deleted_entry),
commit_time)) {
validated = false;
}
}
return validated;
}
static bool ValidateEntryConditions(StoredKeyValuePairs& data,
BufferedReadModifyWriteEntry& entry,
const absl::Time& commit_time)
ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
auto& stamp = entry.stamp();
auto if_equal = StorageGeneration::Clean(stamp.generation);
if (StorageGeneration::IsUnknown(if_equal)) {
assert(stamp.time == absl::InfiniteFuture());
return true;
}
auto it = data.values.find(entry.key_);
if (it == data.values.end()) {
if (StorageGeneration::IsNoValue(if_equal)) {
stamp.time = commit_time;
return true;
}
} else if (if_equal == it->second.generation()) {
stamp.time = commit_time;
return true;
}
return false;
}
static void ApplyMutation(
StoredKeyValuePairs& data,
internal_kvstore::SinglePhaseMutation& single_phase_mutation,
const absl::Time& commit_time) ABSL_EXCLUSIVE_LOCKS_REQUIRED(data.mutex) {
for (auto& entry : single_phase_mutation.entries_) {
if (entry.entry_type() == kReadModifyWrite) {
auto& rmw_entry = static_cast<BufferedReadModifyWriteEntry&>(entry);
auto& stamp = rmw_entry.stamp();
stamp.time = commit_time;
auto value_state = rmw_entry.value_state_;
if (!StorageGeneration::IsDirty(stamp.generation)) {
} else if (value_state == ReadResult::kMissing) {
data.values.erase(rmw_entry.key_);
stamp.generation = StorageGeneration::NoValue();
} else {
assert(value_state == ReadResult::kValue);
auto& v = data.values[rmw_entry.key_];
v.generation_number = data.next_generation_number++;
v.value = std::move(rmw_entry.value_);
stamp.generation = v.generation();
}
} else {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto it_range = data.Find(dr_entry.key_, dr_entry.exclusive_max_);
data.values.erase(it_range.first, it_range.second);
}
}
}
};
Future<ReadResult> MemoryDriver::Read(Key key, ReadOptions options) {
auto& data = this->data();
absl::ReaderMutexLock lock(&data.mutex);
auto& values = data.values;
auto it = values.find(key);
if (it == values.end()) {
return ReadResult::Missing(GenerationNow(StorageGeneration::NoValue()));
}
auto stamp = GenerationNow(it->second.generation());
if (!options.generation_conditions.Matches(it->second.generation())) {
return ReadResult::Unspecified(std::move(stamp));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range, options.byte_range.Validate(it->second.value.size()));
return ReadResult::Value(internal::GetSubCord(it->second.value, byte_range),
std::move(stamp));
}
Future<TimestampedStorageGeneration> MemoryDriver::Write(
Key key, std::optional<Value> value, WriteOptions options) {
using ValueWithGenerationNumber =
StoredKeyValuePairs::ValueWithGenerationNumber;
auto& data = this->data();
absl::WriterMutexLock lock(&data.mutex);
auto& values = data.values;
auto it = values.find(key);
if (it == values.end()) {
if (!options.generation_conditions.MatchesNoValue()) {
return GenerationNow(StorageGeneration::Unknown());
}
if (!value) {
return GenerationNow(StorageGeneration::NoValue());
}
it = values
.emplace(std::move(key),
ValueWithGenerationNumber{std::move(*value),
data.next_generation_number++})
.first;
return GenerationNow(it->second.generation());
}
if (!options.generation_conditions.Matches(it->second.generation())) {
return GenerationNow(StorageGeneration::Unknown());
}
if (!value) {
values.erase(it);
return GenerationNow(StorageGeneration::NoValue());
}
it->second.generation_number = data.next_generation_number++;
it->second.value = std::move(*value);
return GenerationNow(it->second.generation());
}
Future<const void> MemoryDriver::DeleteRange(KeyRange range) {
auto& data = this->data();
absl::WriterMutexLock lock(&data.mutex);
if (!range.empty()) {
auto it_range = data.Find(range);
data.values.erase(it_range.first, it_range.second);
}
return absl::OkStatus();
}
void MemoryDriver::ListImpl(ListOptions options, ListReceiver receiver) {
auto& data = this->data();
std::atomic<bool> cancelled{false};
execution::set_starting(receiver, [&cancelled] {
cancelled.store(true, std::memory_order_relaxed);
});
std::vector<ListEntry> entries;
{
absl::ReaderMutexLock lock(&data.mutex);
auto it_range = data.Find(options.range);
for (auto it = it_range.first; it != it_range.second; ++it) {
if (cancelled.load(std::memory_order_relaxed)) break;
std::string_view key = it->first;
entries.push_back(ListEntry{
std::string(
key.substr(std::min(options.strip_prefix_length, key.size()))),
ListEntry::checked_size(it->second.value.size()),
});
}
}
for (auto& entry : entries) {
if (cancelled.load(std::memory_order_relaxed)) break;
execution::set_value(receiver, std::move(entry));
}
execution::set_done(receiver);
execution::set_stopping(receiver);
}
absl::Status MemoryDriver::ReadModifyWrite(
internal::OpenTransactionPtr& transaction, size_t& phase, Key key,
ReadModifyWriteSource& source) {
if (!spec_.atomic) {
return Driver::ReadModifyWrite(transaction, phase, std::move(key), source);
}
return internal_kvstore::AddReadModifyWrite<TransactionNode>(
this, transaction, phase, std::move(key), source);
}
absl::Status MemoryDriver::TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) {
if (!spec_.atomic) {
return Driver::TransactionalDeleteRange(transaction, std::move(range));
}
return internal_kvstore::AddDeleteRange<TransactionNode>(this, transaction,
std::move(range));
}
Result<kvstore::Spec> ParseMemoryUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == tensorstore::MemoryDriverSpec::id);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
auto driver_spec = internal::MakeIntrusivePtr<MemoryDriverSpec>();
driver_spec->data_.memory_key_value_store =
Context::Resource<MemoryKeyValueStoreResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec),
internal::PercentDecode(parsed.authority_and_path)};
}
}
kvstore::DriverPtr GetMemoryKeyValueStore(bool atomic) {
auto ptr = internal::MakeIntrusivePtr<MemoryDriver>();
ptr->spec_.memory_key_value_store =
Context::Default().GetResource<MemoryKeyValueStoreResource>().value();
ptr->spec_.atomic = atomic;
return ptr;
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(tensorstore::MemoryDriver)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::MemoryDriverSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{tensorstore::MemoryDriverSpec::id,
tensorstore::ParseMemoryUrl};
} | #include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::serialization::SerializationRoundTrip;
TEST(MemoryKeyValueStoreTest, Basic) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(MemoryKeyValueStoreTest, DeletePrefix) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRange) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRangeToEnd) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRangeFromBeginning) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
#if 0
TEST(MemoryKeyValueStoreTest, CopyRange) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreCopyRange(store);
}
#endif
TEST(MemoryKeyValueStoreTest, List) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST(MemoryKeyValueStoreTest, Open) {
auto context = Context::Default();
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "memory"}}, context).result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "key", absl::Cord("value")));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open({{"driver", "memory"}}, context).result());
EXPECT_THAT(kvstore::Read(store2, "key").result(),
MatchesKvsReadResult(absl::Cord("value")));
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto other_context, Context::FromJson({{"memory_key_value_store",
::nlohmann::json::object_t{}}},
context));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store3,
kvstore::Open({{"driver", "memory"}}, other_context).result());
EXPECT_THAT(kvstore::Read(store3, "key").result(),
MatchesKvsReadResultNotFound());
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "memory"}}, context).result());
EXPECT_EQ("value", kvstore::Read(store, "key").value().value);
}
}
TEST(MemoryKeyValueStoreTest, ListWithPath) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "memory"}, {"path", "p/"}}, context).result());
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST(MemoryKeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {
{"driver", "memory"},
};
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(MemoryKeyValueStoreTest, SpecRoundtripWithContextSpec) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.spec_request_options.Set(tensorstore::unbind_context);
options.full_spec = {
{"driver", "memory"},
{"memory_key_value_store", "memory_key_value_store#a"},
{"context",
{
{"memory_key_value_store#a", ::nlohmann::json::object_t()},
}},
};
options.check_data_persists = false;
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(MemoryKeyValueStoreTest, InvalidSpec) {
auto context = tensorstore::Context::Default();
EXPECT_THAT(
kvstore::Open({{"driver", "memory"}, {"extra", "key"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(MemoryKeyValueStoreTest, BoundSpec) {
auto context = tensorstore::Context::Default();
::nlohmann::json json_spec{{"driver", "memory"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
kvstore::Spec::FromJson(json_spec));
TENSORSTORE_ASSERT_OK(spec.BindContext(context));
std::string bound_spec_cache_key;
tensorstore::internal::EncodeCacheKey(&bound_spec_cache_key, spec.driver);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store, kvstore::Open(spec).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_spec,
store.spec(tensorstore::retain_context));
std::string store_cache_key;
tensorstore::internal::EncodeCacheKey(&store_cache_key, store.driver);
EXPECT_EQ(bound_spec_cache_key, store_cache_key);
new_spec.StripContext();
EXPECT_THAT(new_spec.ToJson(tensorstore::IncludeDefaults{false}),
::testing::Optional(json_spec));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open(json_spec, context).result());
std::string store2_cache_key;
tensorstore::internal::EncodeCacheKey(&store2_cache_key, store2.driver);
EXPECT_EQ(store_cache_key, store2_cache_key);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2,
kvstore::Open(
{{"driver", "memory"},
{"context",
{{"memory_key_value_store#a", "memory_key_value_store"}}},
{"memory_key_value_store", "memory_key_value_store#a"}},
context)
.result());
std::string store2_cache_key;
tensorstore::internal::EncodeCacheKey(&store2_cache_key, store2.driver);
EXPECT_EQ(store_cache_key, store2_cache_key);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store3,
kvstore::Open(json_spec).result());
std::string store3_cache_key;
tensorstore::internal::EncodeCacheKey(&store3_cache_key, store3.driver);
EXPECT_NE(store_cache_key, store3_cache_key);
}
}
TEST(MemoryKeyValueStoreTest, OpenCache) {
auto context = tensorstore::Context::Default();
::nlohmann::json json_spec{{"driver", "memory"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store1,
kvstore::Open(json_spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store2,
kvstore::Open(json_spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store3,
kvstore::Open(json_spec).result());
EXPECT_EQ(store1.driver.get(), store2.driver.get());
EXPECT_NE(store1.driver.get(), store3.driver.get());
std::string cache_key1, cache_key3;
tensorstore::internal::EncodeCacheKey(&cache_key1, store1.driver);
tensorstore::internal::EncodeCacheKey(&cache_key3, store3.driver);
EXPECT_NE(cache_key1, cache_key3);
}
TEST(MemoryKeyValueStoreTest, ContextBinding) {
auto context1 = Context::Default();
auto context2 = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_spec, kvstore::Spec::FromJson({{"driver", "memory"}}));
auto base_spec1 = base_spec;
TENSORSTORE_ASSERT_OK(base_spec1.Set(context1));
EXPECT_THAT(
base_spec1.ToJson(),
::testing::Optional(MatchesJson(
{{"driver", "memory"},
{"context",
{{"memory_key_value_store", ::nlohmann::json::object_t()}}}})));
auto base_spec2 = base_spec;
TENSORSTORE_ASSERT_OK(base_spec2.Set(context2));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store1,
kvstore::Open(base_spec, context1).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store2,
kvstore::Open(base_spec, context2).result());
ASSERT_NE(store1.driver, store2.driver);
EXPECT_THAT(kvstore::Open(base_spec1).result(), ::testing::Optional(store1));
EXPECT_THAT(kvstore::Open(base_spec2).result(), ::testing::Optional(store2));
auto base_spec3 = base_spec1;
TENSORSTORE_ASSERT_OK(base_spec3.Set(context2));
EXPECT_THAT(kvstore::Open(base_spec3).result(), ::testing::Optional(store1));
TENSORSTORE_ASSERT_OK(base_spec3.Set(tensorstore::strip_context, context2));
EXPECT_THAT(kvstore::Open(base_spec3).result(), ::testing::Optional(store2));
}
TEST(MemoryKeyValueStoreTest, SpecSerialization) {
::nlohmann::json json_spec{{"driver", "memory"}, {"path", "abc/"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
kvstore::Spec::FromJson(json_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_roundtripped,
SerializationRoundTrip(spec));
EXPECT_THAT(spec_roundtripped.ToJson(),
::testing::Optional(MatchesJson(json_spec)));
}
TEST(MemoryKeyValueStoreTest, KvStoreSerialization) {
::nlohmann::json json_spec{{"driver", "memory"}, {"path", "abc/"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open(json_spec).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store_roundtripped,
SerializationRoundTrip(store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_roundtripped,
store_roundtripped.spec());
EXPECT_THAT(spec_roundtripped.ToJson(),
::testing::Optional(MatchesJson(json_spec)));
}
TEST(MemoryKeyValueStoreTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip({{"driver", "memory"}},
"memory:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "memory"}, {"path", "abc/"}}, "memory:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "memory"}, {"path", "abc def/"}}, "memory:
}
TEST(MemoryKeyValueStoreTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("memory:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("memory:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
}
} |
624 | cpp | google/tensorstore | box_difference | tensorstore/internal/box_difference.cc | tensorstore/internal/box_difference_test.cc | #ifndef TENSORSTORE_INTERNAL_BOX_DIFFERENCE_H_
#define TENSORSTORE_INTERNAL_BOX_DIFFERENCE_H_
#include <limits>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
namespace tensorstore {
namespace internal {
class BoxDifference {
public:
BoxDifference(BoxView<> outer, BoxView<> inner);
DimensionIndex rank() const { return outer_.rank(); }
Index num_sub_boxes() const { return num_sub_boxes_; }
void GetSubBox(Index sub_box_index, MutableBoxView<> out) const;
private:
BoxView<> outer_;
BoxView<> inner_;
Index num_sub_boxes_;
};
}
}
#endif
#include "tensorstore/internal/box_difference.h"
#include <cassert>
#include <limits>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/integer_overflow.h"
namespace tensorstore {
namespace internal {
namespace {
Index GetNumSubtractionSubBoxes(BoxView<> outer, BoxView<> inner) {
assert(outer.rank() == inner.rank());
const DimensionIndex rank = outer.rank();
Index total_count = 1;
for (DimensionIndex i = 0; i < rank; ++i) {
IndexInterval outer_interval = outer[i];
IndexInterval inner_interval = inner[i];
Index num_parts = 1;
if (Intersect(outer_interval, inner_interval).empty()) {
return 1;
}
if (outer_interval.inclusive_min() < inner_interval.inclusive_min()) {
++num_parts;
}
if (outer_interval.inclusive_max() > inner_interval.inclusive_max()) {
++num_parts;
}
total_count *= num_parts;
}
return total_count - 1;
}
}
BoxDifference::BoxDifference(BoxView<> outer, BoxView<> inner)
: outer_(outer),
inner_(inner),
num_sub_boxes_(GetNumSubtractionSubBoxes(outer, inner)) {}
void BoxDifference::GetSubBox(Index sub_box_index, MutableBoxView<> out) const {
const DimensionIndex rank = out.rank();
assert(rank == outer_.rank());
assert(sub_box_index >= 0 && sub_box_index < num_sub_boxes_);
++sub_box_index;
for (DimensionIndex i = 0; i < rank; ++i) {
IndexInterval outer_interval = outer_[i];
IndexInterval inner_interval = inner_[i];
Index num_parts = 1;
IndexInterval intersection = Intersect(outer_interval, inner_interval);
if (intersection.empty()) {
out.DeepAssign(outer_);
return;
}
const bool has_before =
outer_interval.inclusive_min() < inner_interval.inclusive_min();
const bool has_after =
outer_interval.inclusive_max() > inner_interval.inclusive_max();
if (has_before) ++num_parts;
if (has_after) ++num_parts;
const Index part_i = sub_box_index % num_parts;
switch (part_i) {
case 0:
out[i] = intersection;
break;
case 1:
if (has_before) {
out[i] = IndexInterval::UncheckedHalfOpen(
outer_interval.inclusive_min(), inner_interval.inclusive_min());
break;
}
[[fallthrough]];
case 2:
out[i] = IndexInterval::UncheckedHalfOpen(
inner_interval.exclusive_max(), outer_interval.exclusive_max());
break;
}
sub_box_index /= num_parts;
}
}
}
} | #include "tensorstore/internal/box_difference.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::internal::BoxDifference;
std::vector<Box<>> Subtract(BoxView<> outer, BoxView<> inner) {
BoxDifference difference(outer, inner);
Index count = difference.num_sub_boxes();
std::vector<Box<>> boxes(count);
for (Index i = 0; i < count; ++i) {
auto& out = boxes[i];
out.set_rank(outer.rank());
difference.GetSubBox(i, out);
}
return boxes;
}
TEST(BoxDifferenceTest, RankZero) {
EXPECT_THAT(Subtract(BoxView<>(), BoxView<>()),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankOneEmptyResult) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({1}, {5})),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankOneFullResult) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({6}, {5})),
::testing::UnorderedElementsAre(BoxView({1}, {5})));
}
TEST(BoxDifferenceTest, RankOneBeforeOnly) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({3}, {4})),
::testing::UnorderedElementsAre(BoxView({1}, {2})));
}
TEST(BoxDifferenceTest, RankOneAfterOnly) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({0}, {3})),
::testing::UnorderedElementsAre(BoxView({3}, {3})));
}
TEST(BoxDifferenceTest, RankOneBeforeAndAfter) {
EXPECT_THAT(
Subtract(BoxView({1}, {5}), BoxView({2}, {2})),
::testing::UnorderedElementsAre(BoxView({1}, {1}), BoxView({4}, {2})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({1, 2}, {5, 7})),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankTwoDim0FullDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({6, 2}, {5, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {5, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Full) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({1, 10}, {5, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {5, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {3, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {4, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {1, 7}),
BoxView({5, 2}, {1, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Before) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, 4}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 1}, {7, 3})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1After) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, -1}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {7, 4})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1BeforeAfter) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, 2}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 1}, {7, 1}),
BoxView({2, 5}, {7, 1})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 4}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 4}, {3, 5}),
BoxView({4, 2}, {2, 2}),
BoxView({1, 2}, {3, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 4}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({2, 4}, {4, 5}),
BoxView({1, 2}, {1, 2}),
BoxView({2, 2}, {4, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 4}, {3, 7})),
::testing::UnorderedElementsAre(
BoxView({1, 4}, {1, 5}), BoxView({5, 4}, {1, 5}),
BoxView({2, 2}, {3, 2}), BoxView({1, 2}, {1, 2}),
BoxView({5, 2}, {1, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 2}, {3, 1})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {3, 1}),
BoxView({4, 3}, {2, 6}),
BoxView({1, 3}, {3, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 2}, {3, 1})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {4, 1}),
BoxView({1, 3}, {1, 6}),
BoxView({2, 3}, {4, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 2}, {3, 1})),
::testing::UnorderedElementsAre(
BoxView({1, 2}, {1, 1}), BoxView({5, 2}, {1, 1}),
BoxView({2, 3}, {3, 6}), BoxView({1, 3}, {1, 6}),
BoxView({5, 3}, {1, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1BeforeAfter) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 3}, {3, 1})),
::testing::UnorderedElementsAre(
BoxView({1, 3}, {1, 1}), BoxView({5, 3}, {1, 1}),
BoxView({2, 2}, {3, 1}), BoxView({1, 2}, {1, 1}),
BoxView({5, 2}, {1, 1}), BoxView({2, 4}, {3, 5}),
BoxView({1, 4}, {1, 5}), BoxView({5, 4}, {1, 5})));
}
} |
625 | cpp | google/tensorstore | grid_partition | tensorstore/internal/grid_partition.cc | tensorstore/internal/grid_partition_test.cc | #ifndef TENSORSTORE_INTERNAL_GRID_PARTITION_H_
#define TENSORSTORE_INTERNAL_GRID_PARTITION_H_
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
absl::Status PartitionIndexTransformOverRegularGrid(
span<const DimensionIndex> grid_output_dimensions,
span<const Index> grid_cell_shape, IndexTransformView<> transform,
absl::FunctionRef<absl::Status(span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func);
absl::Status PartitionIndexTransformOverGrid(
span<const DimensionIndex> grid_output_dimensions,
absl::FunctionRef<Index(DimensionIndex grid_dim, Index output_index,
IndexInterval* cell_bounds)>
output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func);
absl::Status GetGridCellRanges(
span<const DimensionIndex> grid_output_dimensions, BoxView<> grid_bounds,
absl::FunctionRef<Index(DimensionIndex grid_dim, Index output_index,
IndexInterval* cell_bounds)>
output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(BoxView<> bounds)> callback);
}
namespace internal_grid_partition {
class IndexTransformGridPartition;
absl::Status GetGridCellRanges(
const IndexTransformGridPartition& grid_partition,
span<const DimensionIndex> grid_output_dimensions, BoxView<> grid_bounds,
absl::FunctionRef<Index(DimensionIndex grid_dim, Index output_index,
IndexInterval* cell_bounds)>
output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(BoxView<> bounds)> callback);
}
}
#endif
#include "tensorstore/internal/grid_partition.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_map.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_grid_partition {
namespace {
using ::tensorstore::internal_index_space::OutputIndexMap;
using ::tensorstore::internal_index_space::TransformAccess;
using ::tensorstore::internal_index_space::TransformRep;
using IndexArraySet = IndexTransformGridPartition::IndexArraySet;
using StridedSet = IndexTransformGridPartition::StridedSet;
struct ConnectedSetIterateParameters {
const IndexTransformGridPartition& info;
span<const DimensionIndex> grid_output_dimensions;
OutputToGridCellFn output_to_grid_cell;
IndexTransformView<> transform;
absl::FunctionRef<absl::Status(span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func;
};
void InitializeConstantGridCellIndices(
IndexTransformView<> transform,
span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell, span<Index> grid_cell_indices) {
for (DimensionIndex grid_dim = 0; grid_dim < grid_output_dimensions.size();
++grid_dim) {
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
const OutputIndexMapRef<> map = transform.output_index_map(output_dim);
if (map.method() != OutputIndexMethod::constant) continue;
grid_cell_indices[grid_dim] =
output_to_grid_cell(grid_dim, map.offset(), nullptr);
}
}
class StridedSetGridCellIterator {
public:
explicit StridedSetGridCellIterator(
IndexTransformView<> transform,
span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell, StridedSet strided_set)
: transform_(transform),
grid_output_dimensions_(grid_output_dimensions),
output_to_grid_cell_(output_to_grid_cell),
strided_set_(strided_set) {
const IndexInterval domain =
transform.input_domain()[strided_set.input_dimension];
input_index_ = domain.inclusive_min();
input_end_index_ = domain.exclusive_max();
}
bool AtEnd() const { return input_index_ == input_end_index_; }
IndexInterval Next(span<Index> grid_cell_indices) {
assert(!AtEnd());
IndexInterval restricted_domain =
IndexInterval::UncheckedHalfOpen(input_index_, input_end_index_);
for (const DimensionIndex grid_dim :
strided_set_.grid_dimensions.index_view()) {
const DimensionIndex output_dim = grid_output_dimensions_[grid_dim];
const OutputIndexMapRef<> map = transform_.output_index_map(output_dim);
IndexInterval cell_range;
grid_cell_indices[grid_dim] = output_to_grid_cell_(
grid_dim, input_index_ * map.stride() + map.offset(), &cell_range);
const IndexInterval cell_domain =
GetAffineTransformDomain(cell_range, map.offset(), map.stride())
.value();
restricted_domain = Intersect(restricted_domain, cell_domain);
}
assert(!restricted_domain.empty());
input_index_ = restricted_domain.exclusive_max();
return restricted_domain;
}
private:
IndexTransformView<> transform_;
span<const DimensionIndex> grid_output_dimensions_;
OutputToGridCellFn output_to_grid_cell_;
StridedSet strided_set_;
Index input_index_;
Index input_end_index_;
};
class ConnectedSetIterateHelper {
public:
explicit ConnectedSetIterateHelper(ConnectedSetIterateParameters params)
: params_(std::move(params)),
grid_cell_indices_(params_.grid_output_dimensions.size()),
cell_transform_(internal_grid_partition::InitializeCellTransform(
params_.info, params_.transform)) {
InitializeConstantGridCellIndices(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, grid_cell_indices_);
}
absl::Status Iterate() { return IterateOverIndexArraySets(0); }
private:
absl::Status IterateOverIndexArraySets(DimensionIndex set_i) {
if (set_i == params_.info.index_array_sets().size()) {
return IterateOverStridedSets(0);
}
const IndexArraySet& index_array_set =
params_.info.index_array_sets()[set_i];
const auto grid_dimensions = index_array_set.grid_dimensions;
const DimensionIndex num_grid_dimensions = grid_dimensions.count();
for (Index partition_i = 0,
num_partitions = index_array_set.num_partitions();
partition_i < num_partitions; ++partition_i) {
const Index grid_cell_indices_offset = partition_i * num_grid_dimensions;
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dimensions.index_view()) {
grid_cell_indices_[grid_dim] =
index_array_set
.grid_cell_indices[grid_cell_indices_offset + grid_i++];
}
UpdateCellTransformForIndexArraySetPartition(
index_array_set, set_i, partition_i, cell_transform_.get());
TENSORSTORE_RETURN_IF_ERROR(IterateOverIndexArraySets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status IterateOverStridedSets(DimensionIndex set_i) {
if (set_i == params_.info.strided_sets().size()) return InvokeCallback();
StridedSetGridCellIterator iterator(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, params_.info.strided_sets()[set_i]);
const DimensionIndex cell_input_dim =
set_i + params_.info.index_array_sets().size();
while (!iterator.AtEnd()) {
auto restricted_domain = iterator.Next(grid_cell_indices_);
cell_transform_->input_origin()[cell_input_dim] =
restricted_domain.inclusive_min();
cell_transform_->input_shape()[cell_input_dim] = restricted_domain.size();
TENSORSTORE_RETURN_IF_ERROR(IterateOverStridedSets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status InvokeCallback() {
internal_index_space::DebugCheckInvariants(cell_transform_.get());
auto status = params_.func(
grid_cell_indices_,
TransformAccess::Make<IndexTransformView<>>(cell_transform_.get()));
cell_transform_ = MutableRep(std::move(cell_transform_));
return status;
}
ConnectedSetIterateParameters params_;
absl::FixedArray<Index, internal::kNumInlinedDims> grid_cell_indices_;
internal_index_space::TransformRep::Ptr<> cell_transform_;
};
bool GetStridedGridCellRanges(
IndexTransformView<> transform, OutputToGridCellFn output_to_grid_cell,
DimensionIndex grid_dim, DimensionIndex output_dim,
absl::FunctionRef<bool(IndexInterval grid_cell_range)> callback) {
const auto output_map = transform.output_index_maps()[output_dim];
assert(output_map.method() == OutputIndexMethod::single_input_dimension);
const Index output_offset = output_map.offset();
const Index output_stride = output_map.stride();
const DimensionIndex input_dim = output_map.input_dimension();
const IndexInterval input_domain = transform.domain().box()[input_dim];
if (output_map.stride() == 1 || output_map.stride() == -1) {
auto output_range = tensorstore::GetAffineTransformRange(
input_domain, output_offset, output_stride)
.value();
Index min_cell_index =
output_to_grid_cell(grid_dim, output_range.inclusive_min(), nullptr);
Index max_cell_index =
output_to_grid_cell(grid_dim, output_range.inclusive_max(), nullptr);
return callback(
IndexInterval::UncheckedClosed(min_cell_index, max_cell_index));
}
IndexInterval prev_interval;
for (Index input_index = input_domain.inclusive_min();
input_index < input_domain.exclusive_max();) {
IndexInterval output_range;
Index grid_cell = output_to_grid_cell(
grid_dim, input_index * output_stride + output_offset, &output_range);
const IndexInterval cell_domain =
GetAffineTransformDomain(output_range, output_offset, output_stride)
.value();
assert(!cell_domain.empty());
if (grid_cell == prev_interval.exclusive_min() ||
grid_cell == prev_interval.exclusive_max()) {
prev_interval = IndexInterval::UncheckedClosed(
std::min(prev_interval.inclusive_min(), grid_cell),
std::max(prev_interval.inclusive_max(), grid_cell));
} else {
if (IsFinite(prev_interval)) {
if (!callback(prev_interval)) return false;
}
prev_interval = IndexInterval::UncheckedClosed(grid_cell, grid_cell);
}
input_index = cell_domain.exclusive_max();
}
return callback(prev_interval);
}
struct GetGridCellRangesIterateParameters {
const IndexTransformGridPartition& info;
span<const DimensionIndex> grid_output_dimensions;
OutputToGridCellFn output_to_grid_cell;
IndexTransformView<> transform;
absl::FunctionRef<absl::Status(BoxView<> bounds)> func;
DimensionIndex outer_prefix_rank;
BoxView<> grid_bounds;
span<const IndexInterval> inner_intervals;
span<const StridedSet*> strided_sets_in_prefix;
span<const IndexArraySet*> index_array_sets_in_prefix;
};
class GetGridCellRangesIterateHelper {
public:
explicit GetGridCellRangesIterateHelper(
GetGridCellRangesIterateParameters params)
: params_(params) {
InitializeConstantGridCellIndices(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell,
span<Index>(&grid_bounds_origin_[0], params_.transform.output_rank()));
for (DimensionIndex i = 0; i < params.outer_prefix_rank; ++i) {
grid_bounds_shape_[i] = 1;
}
for (DimensionIndex i = params.outer_prefix_rank + 1,
rank = params.grid_bounds.rank();
i < rank; ++i) {
grid_bounds_origin_[i] = params.grid_bounds.origin()[i];
grid_bounds_shape_[i] = params.grid_bounds.shape()[i];
}
if (params.inner_intervals.size() == 1) {
const auto& inner_interval = params.inner_intervals[0];
grid_bounds_origin_[params.outer_prefix_rank] =
inner_interval.inclusive_min();
grid_bounds_shape_[params.outer_prefix_rank] = inner_interval.size();
}
}
absl::Status Iterate() { return IterateOverIndexArraySets(0); }
private:
GetGridCellRangesIterateParameters params_;
Index grid_bounds_origin_[kMaxRank];
Index grid_bounds_shape_[kMaxRank];
absl::Status IterateOverIndexArraySets(DimensionIndex set_i) {
if (set_i == params_.index_array_sets_in_prefix.size()) {
return IterateOverStridedSets(0);
}
const IndexArraySet& index_array_set =
*params_.index_array_sets_in_prefix[set_i];
const auto grid_dimensions = index_array_set.grid_dimensions;
const DimensionIndex num_grid_dimensions = grid_dimensions.count();
for (Index partition_i = 0,
num_partitions = index_array_set.num_partitions();
partition_i < num_partitions; ++partition_i) {
const Index grid_cell_indices_offset = partition_i * num_grid_dimensions;
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dimensions.index_view()) {
grid_bounds_origin_[grid_dim] =
index_array_set
.grid_cell_indices[grid_cell_indices_offset + grid_i++];
}
TENSORSTORE_RETURN_IF_ERROR(IterateOverIndexArraySets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status IterateOverStridedSets(DimensionIndex set_i) {
if (set_i == params_.strided_sets_in_prefix.size()) return InvokeCallback();
StridedSetGridCellIterator iterator(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, *params_.strided_sets_in_prefix[set_i]);
while (!iterator.AtEnd()) {
iterator.Next(grid_bounds_origin_);
TENSORSTORE_RETURN_IF_ERROR(IterateOverStridedSets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status InvokeCallback() {
MutableBoxView<> bounds(params_.grid_bounds.rank(), grid_bounds_origin_,
grid_bounds_shape_);
if (params_.inner_intervals.size() == 1) {
return params_.func(bounds);
}
DimensionIndex outer_prefix_rank = params_.outer_prefix_rank;
for (const auto& inner_interval : params_.inner_intervals) {
bounds[outer_prefix_rank] = inner_interval;
TENSORSTORE_RETURN_IF_ERROR(params_.func(bounds));
}
return absl::OkStatus();
}
};
}
}
namespace internal {
absl::Status PartitionIndexTransformOverGrid(
span<const DimensionIndex> grid_output_dimensions,
internal_grid_partition::OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func) {
internal_grid_partition::IndexTransformGridPartition partition_info;
auto status = internal_grid_partition::PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell, partition_info);
if (!status.ok()) return status;
return internal_grid_partition::ConnectedSetIterateHelper(
{partition_info,
grid_output_dimensions,
output_to_grid_cell,
transform,
std::move(func)})
.Iterate();
}
absl::Status PartitionIndexTransformOverRegularGrid(
span<const DimensionIndex> grid_output_dimensions,
span<const Index> grid_cell_shape, IndexTransformView<> transform,
absl::FunctionRef<absl::Status(span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func) {
assert(grid_cell_shape.size() == grid_output_dimensions.size());
internal_grid_partition::RegularGridRef grid{grid_cell_shape};
return PartitionIndexTransformOverGrid(grid_output_dimensions, grid,
transform, std::move(func));
}
}
namespace internal_grid_partition {
absl::Status GetGridCellRanges(
const IndexTransformGridPartition& grid_partition,
span<const DimensionIndex> grid_output_dimensions, BoxView<> grid_bounds,
OutputToGridCellFn output_to_grid_cell, IndexTransformView<> transform,
absl::FunctionRef<absl::Status(BoxView<> bounds)> callback) {
assert(grid_output_dimensions.size() == grid_bounds.rank());
if (transform.domain().box().is_empty()) {
r | #include "tensorstore/internal/grid_partition.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/irregular_grid.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::internal::GetGridCellRanges;
using ::tensorstore::internal::IrregularGrid;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::OutputToGridCellFn;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
namespace partition_tests {
using R = std::pair<std::vector<Index>, IndexTransform<>>;
std::vector<R> GetPartitions(
const std::vector<DimensionIndex>& grid_output_dimensions,
const std::vector<Index>& grid_cell_shape, IndexTransformView<> transform) {
std::vector<R> results;
IndexTransformGridPartition info;
RegularGridRef grid{grid_cell_shape};
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, info));
TENSORSTORE_CHECK_OK(
tensorstore::internal::PartitionIndexTransformOverRegularGrid(
grid_output_dimensions, grid_cell_shape, transform,
[&](span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
auto cell_transform_direct = info.GetCellTransform(
transform, grid_cell_indices, grid_output_dimensions,
[&](DimensionIndex dim, Index cell_index) {
return grid.GetCellOutputInterval(dim, cell_index);
});
EXPECT_EQ(cell_transform_direct, cell_transform);
results.emplace_back(std::vector<Index>(grid_cell_indices.begin(),
grid_cell_indices.end()),
IndexTransform<>(cell_transform));
return absl::OkStatus();
}));
return results;
}
TEST(PartitionIndexTransformOverRegularGrid, ConstantOneDimensional) {
const auto results = GetPartitions({0}, {2},
IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({4})
.output_constant(0, 3)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({4})
.output_single_input_dimension(0, 0)
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, ConstantTwoDimensional) {
const auto results = GetPartitions({0, 1}, {2, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({2, 3})
.input_shape({4, 5})
.output_constant(0, 3)
.output_constant(1, 7)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({2, 3})
.input_shape({4, 5})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, OneDimensionalUnitStride) {
const auto results = GetPartitions({0}, {2},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({5})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()},
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({-2})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()},
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, TwoDimensionalIdentity) {
const auto results = GetPartitions({0, 1}, {20, 10},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({30, 30})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{0, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 10})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 20})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 0})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 10})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 20})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleStridedDimension) {
const auto results =
GetPartitions({0}, {10},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({6})
.output_single_input_dimension(0, 5, 3, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()},
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({-1})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, DiagonalStridedDimensions) {
const auto results =
GetPartitions({0, 1}, {10, 8},
IndexTransformBuilder<>(1, 2)
.input_origin({-4})
.input_shape({6})
.output_single_input_dimension(0, 5, 3, 0)
.output_single_input_dimension(1, 7, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({-1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleIndexArrayDimension) {
const auto results =
GetPartitions({0}, {3},
IndexTransformBuilder<>(1, 1)
.input_origin({100})
.input_shape({8})
.output_index_array(
0, 0, 1, MakeArray<Index>({1, 2, 3, 4, 5, 6, 7, 8}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 101}))
.Finalize()
.value()},
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({102, 103, 104}))
.Finalize()
.value()},
R{{2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({105, 106, 107}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleIndexArrayDimensionStrided) {
const auto results = GetPartitions(
{0}, {10},
IndexTransformBuilder<>(1, 1)
.input_origin({100})
.input_shape({6})
.output_index_array(0, 5, 3, MakeArray<Index>({10, 3, 4, -5, -6, 11}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({104}))
.Finalize()
.value()},
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({103}))
.Finalize()
.value()},
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({101, 102}))
.Finalize()
.value()},
R{{3},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 105}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, TwoIndexArrayDimensions) {
const auto results = GetPartitions(
{0, 1}, {10, 8},
IndexTransformBuilder<>(1, 2)
.input_origin({100})
.input_shape({6})
.output_index_array(0, 5, 3, MakeArray<Index>({10, 3, 4, -5, -6, 11}))
.output_index_array(1, 4, -2, MakeArray<Index>({5, 1, 7, -3, -2, 5}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({104}))
.Finalize()
.value()},
R{{-1, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({103}))
.Finalize()
.value()},
R{{1, -2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({102}))
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({101}))
.Finalize()
.value()},
R{{3, -1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 105}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, IndexArrayAndStridedDimensions) {
const auto results = GetPartitions(
{0, 1}, {10, 8},
IndexTransformBuilder<>(2, 2)
.input_origin({-4, 100})
.input_shape({6, 3})
.output_index_array(0, 5, 3, MakeArray<Index>({{10, 3, 4}}))
.output_single_input_dimension(1, 4, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}, {102}}))
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -1})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}, {102}}))
.Finalize()
.value()},
R{{3, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -1})
.input_shape({1, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()}));
}
std::vector<R> GetIrregularPartitions(
const std::vector<DimensionIndex>& grid_output_dimensions,
const IrregularGrid& grid, IndexTransformView<> transform) {
std::vector<R> results;
TENSORSTORE_CHECK_OK(tensorstore::internal::PartitionIndexTransformOverGrid(
grid_output_dimensions, grid, transform,
[&](span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
results.emplace_back(std::vector<Index>(grid_cell_indices.begin(),
grid_cell_indices.end()),
IndexTransform<>(cell_transform));
return absl::OkStatus();
}));
return results;
}
TEST(PartitionIndexTransformOverIrregularGrid, TwoDimensionalIdentity) {
const std::vector<DimensionIndex> grid_output_dimensions{0, 1};
std::vector<Index> dimension0{15};
std::vector<Index> dimension1{-10, 10, 100};
IrregularGrid grid({dimension0, dimension1});
std::vector<R> results =
GetIrregularPartitions(grid_output_dimensions, grid,
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({30, 30})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({15, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{-1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 10})
.input_shape({15, 20})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({15, 0})
.input_shape({15, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({15, 10})
.input_shape({15, 20})
.output_identity_transform()
.Finalize()
.value()}
));
}
TEST(PartitionIndexTransformOverIrregularGrid, IndexArrayAndStridedDimensions) {
std::vector<Index> dimension0{10, 15, 20, 30, 50};
std::vector<Index> dimension1{0, 1, 5, 10, 13};
IrregularGrid grid({dimension0, dimension1});
std::vector<R> results = GetIrregularPartitions(
{0, 1}, grid,
IndexTransformBuilder<>(2, 2)
.input_origin({-4, 100})
.input_shape({6, 3})
.output_index_array(0, 5, 3, MakeArray<Index>({{10, 3, 4}}))
.output_single_input_dimension(1, 4, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(R{{0, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{0, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{1, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{3, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()}
));
}
}
namespace get_grid_cell_ranges_tests {
using R = Box<>;
Result<std::vector<R>> GetRanges(
span<const DimensionIndex> grid_output_dimensions, BoxView<> grid_bounds,
OutputToGridCellFn output_to_grid_cell, IndexTransformView<> transform) {
std::vector<R> results;
IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell, grid_partition));
TENSORSTORE_RETURN_IF_ERROR(GetGridCellRanges(
grid_output_dimensions, grid_bounds, output_to_grid_cell, transform,
[&](BoxView<> bounds) -> absl::Status {
results.emplace_back(bounds);
return absl::OkStatus();
}));
return results;
}
TEST(GetGridCellRangesTest, Rank0) {
EXPECT_THAT(GetRanges({}, {},
RegularGridRef{{}},
IndexTransformBuilder(0, 0).Finalize().value()),
::testing::Optional(ElementsAre(R{})));
}
TEST(GetGridCellRangesTest, Rank1Unconstrained) {
EXPECT_THAT(GetRanges({{0}},
Box<>{{0}, {10}},
RegularGridRef{{{5}}},
IndexTransformBuilder(1, 1)
.input_shape({50})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R{{0}, {10}})));
}
TEST(GetGridCellRangesTest, Rank1Constrained) {
EXPECT_THAT(GetRanges({{0}},
Box<>{{0}, {10}},
RegularGridRef{{{5}}},
IndexTransformBuilder(1, 1)
.input_origin({7})
.input_shape({30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R({1}, {7}))));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedBothDims) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 10}}},
IndexTransformBuilder(2, 2)
.input_origin({6, 7})
.input_shape({8, 30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 0}, {1, 4}},
R{{2, 0}, {1, 4}}
)));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedFirstDimOnly) {
/ |
626 | cpp | google/tensorstore | dimension_labels | tensorstore/internal/dimension_labels.cc | tensorstore/internal/dimension_labels_test.cc | #ifndef TENSORSTORE_INTERNAL_DIMENSION_LABELS_H_
#define TENSORSTORE_INTERNAL_DIMENSION_LABELS_H_
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
absl::Status ValidateDimensionLabelsAreUnique(span<const std::string> labels);
absl::Status ValidateDimensionLabelsAreUnique(
span<const std::string_view> labels);
}
}
#endif
#include "tensorstore/internal/dimension_labels.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <string_view>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal {
namespace {
absl::Status ValidateDimensionLabelsAreUniqueImpl(
span<std::string_view> sorted_labels) {
std::sort(sorted_labels.begin(), sorted_labels.end());
size_t i;
for (i = 1; i < sorted_labels.size() && sorted_labels[i].empty(); ++i)
continue;
std::string error;
for (; i < sorted_labels.size(); ++i) {
std::string_view label = sorted_labels[i];
if (label == sorted_labels[i - 1]) {
tensorstore::StrAppend(&error, error.empty() ? "" : ", ",
QuoteString(label));
}
}
if (!error.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Dimension label(s) ", error, " not unique"));
}
return absl::OkStatus();
}
}
absl::Status ValidateDimensionLabelsAreUnique(span<const std::string> labels) {
absl::FixedArray<std::string_view, kMaxRank> sorted_labels(labels.begin(),
labels.end());
return ValidateDimensionLabelsAreUniqueImpl(sorted_labels);
}
absl::Status ValidateDimensionLabelsAreUnique(
span<const std::string_view> labels) {
absl::FixedArray<std::string_view, kMaxRank> sorted_labels(labels.begin(),
labels.end());
return ValidateDimensionLabelsAreUniqueImpl(sorted_labels);
}
}
} | #include "tensorstore/internal/dimension_labels.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::ValidateDimensionLabelsAreUnique;
TEST(ValidateDimensionLabelsAreUniqueTest, Basic) {
TENSORSTORE_EXPECT_OK(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c"}));
TENSORSTORE_EXPECT_OK(
ValidateDimensionLabelsAreUnique(std::vector<std::string>{"", "", ""}));
TENSORSTORE_EXPECT_OK(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "", "d", ""}));
TENSORSTORE_EXPECT_OK(
ValidateDimensionLabelsAreUnique(std::vector<std::string>{}));
EXPECT_THAT(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c", "a"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label.* \"a\" not unique"));
EXPECT_THAT(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c", "b"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label.* \"b\" not unique"));
}
} |
627 | cpp | google/tensorstore | data_type_endian_conversion | tensorstore/internal/data_type_endian_conversion.cc | tensorstore/internal/data_type_endian_conversion_test.cc | #ifndef TENSORSTORE_INTERNAL_DATA_TYPE_ENDIAN_CONVERSION_H_
#define TENSORSTORE_INTERNAL_DATA_TYPE_ENDIAN_CONVERSION_H_
#include "absl/strings/cord.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
void EncodeArray(ArrayView<const void> source, ArrayView<void> target,
endian target_endian);
void DecodeArray(ArrayView<const void> source, endian source_endian,
ArrayView<void> target);
void DecodeArray(SharedArrayView<void>* source, endian source_endian,
StridedLayoutView<> decoded_layout);
SharedArrayView<void> CopyAndDecodeArray(ArrayView<const void> source,
endian source_endian,
StridedLayoutView<> decoded_layout);
SharedArrayView<const void> TryViewCordAsArray(const absl::Cord& source,
Index offset, DataType dtype,
endian source_endian,
StridedLayoutView<> layout);
}
}
#endif
#include "tensorstore/internal/data_type_endian_conversion.h"
#include <cassert>
#include <complex>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
void EncodeArray(ArrayView<const void> source, ArrayView<void> target,
endian target_endian) {
const DataType dtype = source.dtype();
assert(absl::c_equal(source.shape(), target.shape()));
assert(dtype == target.dtype());
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
internal::IterateOverStridedLayouts<2>(
{(target_endian == endian::native) ? functions.copy
: functions.swap_endian,
nullptr},
nullptr, source.shape(),
{{const_cast<void*>(source.data()), target.data()}},
{{source.byte_strides().data(), target.byte_strides().data()}},
skip_repeated_elements, {{dtype.size(), dtype.size()}});
}
namespace {
static_assert(sizeof(bool) == 1);
struct DecodeBoolArray {
void operator()(unsigned char* source, bool* output, void*) const {
*output = static_cast<bool>(*source);
}
};
struct DecodeBoolArrayInplace {
void operator()(unsigned char* source, void*) const {
*source = static_cast<bool>(*source);
}
};
}
void DecodeArray(ArrayView<const void> source, endian source_endian,
ArrayView<void> target) {
const DataType dtype = source.dtype();
assert(absl::c_equal(source.shape(), target.shape()));
assert(dtype == target.dtype());
if (dtype.id() != DataTypeId::bool_t) {
EncodeArray(source, target, source_endian);
return;
}
internal::IterateOverStridedLayouts<2>(
{SimpleElementwiseFunction<
DecodeBoolArray(unsigned char, bool), void*>(),
nullptr},
nullptr, source.shape(),
{{const_cast<void*>(source.data()), target.data()}},
{{source.byte_strides().data(), target.byte_strides().data()}},
skip_repeated_elements, {{1, 1}});
}
void DecodeArray(SharedArrayView<void>* source, endian source_endian,
StridedLayoutView<> decoded_layout) {
assert(source != nullptr);
assert(absl::c_equal(source->shape(), decoded_layout.shape()));
const DataType dtype = source->dtype();
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
if ((reinterpret_cast<std::uintptr_t>(source->data()) % dtype->alignment) ==
0 &&
std::all_of(source->byte_strides().begin(), source->byte_strides().end(),
[&](Index byte_stride) {
return (byte_stride % dtype->alignment) == 0;
})) {
const ElementwiseFunction<1, void*>* convert_func = nullptr;
if (dtype.id() == DataTypeId::bool_t) {
convert_func =
SimpleElementwiseFunction<DecodeBoolArrayInplace(unsigned char),
void*>();
} else if (source_endian != endian::native &&
functions.swap_endian_inplace) {
convert_func = functions.swap_endian_inplace;
}
if (convert_func) {
internal::IterateOverStridedLayouts<1>(
{convert_func,
nullptr},
nullptr, source->shape(), {{source->data()}},
{{source->byte_strides().data()}},
skip_repeated_elements, {{dtype.size()}});
}
} else {
*source = CopyAndDecodeArray(*source, source_endian, decoded_layout);
}
}
SharedArrayView<void> CopyAndDecodeArray(ArrayView<const void> source,
endian source_endian,
StridedLayoutView<> decoded_layout) {
SharedArrayView<void> target(
internal::AllocateAndConstructSharedElements(
decoded_layout.num_elements(), default_init, source.dtype()),
decoded_layout);
DecodeArray(source, source_endian, target);
return target;
}
SharedArrayView<const void> TryViewCordAsArray(const absl::Cord& source,
Index offset, DataType dtype,
endian source_endian,
StridedLayoutView<> layout) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
if (source_endian != endian::native && functions.swap_endian_inplace) {
return {};
}
auto maybe_flat = source.TryFlat();
if (!maybe_flat) {
return {};
}
ByteStridedPointer<const void> ptr = maybe_flat->data();
ptr += offset;
if ((reinterpret_cast<std::uintptr_t>(ptr.get()) % dtype->alignment) != 0 ||
!std::all_of(layout.byte_strides().begin(), layout.byte_strides().end(),
[&](Index byte_stride) {
return (byte_stride % dtype->alignment) == 0;
})) {
return {};
}
auto shared_cord = std::make_shared<absl::Cord>(source);
if (auto shared_flat = shared_cord->TryFlat();
!shared_flat || shared_flat->data() != maybe_flat->data()) {
return {};
}
return SharedArrayView<const void>(
SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(shared_cord), ptr.get()),
dtype),
layout);
}
}
} | #include "tensorstore/internal/data_type_endian_conversion.h"
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Array;
using ::tensorstore::c_order;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::endian;
using ::tensorstore::fortran_order;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::StridedLayout;
using ::tensorstore::internal::DecodeArray;
using ::tensorstore::internal::EncodeArray;
using ::tensorstore::internal::TryViewCordAsArray;
TEST(EncodeDecodeArrayTest, Uint8) {
uint8_t source[6] = {1, 2, 3, 4, 5, 6};
uint8_t dest1[6];
uint8_t dest2[6];
uint8_t dest3[6];
uint8_t dest4[6];
EncodeArray(Array(source, {2, 3}, c_order),
Array(dest1, {2, 3}, fortran_order), endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
EncodeArray(Array(source, {2, 3}, c_order),
Array(dest2, {2, 3}, fortran_order), endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
DecodeArray(Array(source, {2, 3}, c_order), endian::little,
Array(dest3, {2, 3}, fortran_order));
EXPECT_THAT(dest3, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
DecodeArray(Array(source, {2, 3}, c_order), endian::big,
Array(dest4, {2, 3}, fortran_order));
EXPECT_THAT(dest4, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
}
TEST(EncodeDecodeArrayTest, Uint16) {
uint16_t source[6] = {0x1234, 0x5678, 0x9012, 0x3456, 0x7890, 0x3344};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<uint16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({0x0,
0x34, 0x12, 0x56, 0x34,
0x78, 0x56, 0x90, 0x78,
0x12, 0x90, 0x44, 0x33}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<uint16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({0x0,
0x12, 0x34, 0x34, 0x56,
0x56, 0x78, 0x78, 0x90,
0x90, 0x12, 0x33, 0x44}));
}
TEST(EncodeDecodeArrayTest, Float16) {
using ::tensorstore::dtypes::float16_t;
float16_t source[6] = {float16_t(1.0), float16_t(2.0), float16_t(3.0),
float16_t(4.0), float16_t(5.0), float16_t(6.0)};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<float16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({0x0,
0x00, 0x3c,
0x00, 0x44,
0x00, 0x40,
0x00, 0x45,
0x00, 0x42,
0x00, 0x46}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<float16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({
0x0,
0x3c, 0x00,
0x44, 0x00,
0x40, 0x00,
0x45, 0x00,
0x42, 0x00,
0x46, 0x00,
}));
}
TEST(EncodeDecodeArrayTest, Bfloat16) {
using ::tensorstore::dtypes::bfloat16_t;
bfloat16_t source[6] = {bfloat16_t(1.0), bfloat16_t(2.0), bfloat16_t(3.0),
bfloat16_t(4.0), bfloat16_t(5.0), bfloat16_t(6.0)};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<bfloat16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({
0x0,
0x80, 0x3f,
0x80, 0x40,
0x00, 0x40,
0xa0, 0x40,
0x40, 0x40,
0xc0, 0x40,
}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<bfloat16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({
0x0,
0x3f, 0x80,
0x40, 0x80,
0x40, 0x00,
0x40, 0xa0,
0x40, 0x40,
0x40, 0xc0,
}));
}
TEST(DecodeArrayTest, Bool) {
unsigned char source[6] = {0x12, 0x00, 0x34, 0x1, 0x78, 0x00};
unsigned char dest[6];
DecodeArray(Array(reinterpret_cast<bool*>(source), {2, 3}, c_order),
endian::little,
Array(reinterpret_cast<bool*>(dest), {2, 3}, fortran_order));
EXPECT_THAT(dest, ::testing::ElementsAre(1, 1, 0, 1, 1, 0));
}
TEST(DecodeArrayTest, Uint16InPlaceLittleEndian) {
alignas(2) unsigned char source[12] = {0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::little, alt_layout);
EXPECT_EQ(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), source_array.layout());
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x3412, 0x7856, 0x1290},
{0x5634, 0x9078, 0x4433}}));
}
TEST(DecodeArrayTest, Uint16InPlaceBigEndian) {
alignas(2) unsigned char source[12] = {0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::big, alt_layout);
EXPECT_EQ(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), source_array.layout());
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x1234, 0x5678, 0x9012},
{0x3456, 0x7890, 0x3344}}));
}
TEST(DecodeArrayTest, Uint16InPlaceLittleEndianUnaligned) {
alignas(2) unsigned char source[13] = {0x00,
0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source + 1), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::little, alt_layout);
EXPECT_NE(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), alt_layout);
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x3412, 0x7856, 0x1290},
{0x5634, 0x9078, 0x4433}}));
}
void TestConvertCordInplace(DataType dtype, endian endian_value,
ContiguousLayoutOrder order,
bool expected_inplace) {
SCOPED_TRACE(tensorstore::StrCat("dtype=", dtype, ", order=", order,
", endian=", endian_value));
auto orig_array = tensorstore::AllocateArray(
{4, 5, 6}, order, tensorstore::default_init, dtype);
EXPECT_EQ(1, orig_array.pointer().use_count());
auto cord = absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(orig_array.data()),
dtype.size() * orig_array.num_elements()),
[owner = orig_array.pointer()](std::string_view s) {});
auto cord_array = TryViewCordAsArray(cord, 0, dtype, endian_value,
orig_array.layout());
if (expected_inplace) {
EXPECT_EQ(orig_array.data(), cord_array.data());
EXPECT_EQ(2, orig_array.pointer().use_count());
cord.Clear();
EXPECT_EQ(2, orig_array.pointer().use_count());
} else {
EXPECT_FALSE(cord_array.valid());
}
}
TEST(TryViewCordAsArrayTest, Inplace) {
const DataType data_types[] = {dtype_v<uint8_t>, dtype_v<uint16_t>,
dtype_v<uint32_t>, dtype_v<uint64_t>};
for (auto dtype : data_types) {
for (auto order : {tensorstore::c_order, tensorstore::fortran_order}) {
TestConvertCordInplace(dtype, endian::native, order,
true);
}
}
constexpr endian non_native =
endian::native == endian::little ? endian::big : endian::little;
TestConvertCordInplace(dtype_v<uint8_t>, non_native, tensorstore::c_order,
true);
TestConvertCordInplace(dtype_v<bool>, non_native, tensorstore::c_order,
true);
TestConvertCordInplace(dtype_v<uint32_t>, non_native, tensorstore::c_order,
false);
}
TEST(TryViewCordAsArrayTest, FlatCordBuilder) {
constexpr size_t kExtraBytes = 8;
tensorstore::internal::FlatCordBuilder builder(sizeof(uint32_t) * 3 * 4 * 5 +
kExtraBytes);
StridedLayout<> layout(tensorstore::c_order, sizeof(uint32_t), {3, 4, 5});
char* data_ptr = builder.data();
auto cord = std::move(builder).Build();
for (size_t offset = 0; offset < kExtraBytes; ++offset) {
auto array = TryViewCordAsArray(cord, offset, dtype_v<uint32_t>,
endian::native, layout);
if ((offset % alignof(uint32_t)) == 0) {
EXPECT_EQ(static_cast<void*>(data_ptr + offset), array.data());
EXPECT_EQ(layout, array.layout());
} else {
EXPECT_FALSE(array.valid());
}
}
}
TEST(TryViewCordAsArrayTest, Fragmented) {
std::vector<std::string> parts{
std::string(sizeof(uint32_t) * 3 * 3 * 5, '\0'),
std::string(sizeof(uint32_t) * 3 * 1 * 5, '\0')};
StridedLayout<> layout(tensorstore::c_order, sizeof(uint32_t), {3, 4, 5});
absl::Cord cord = absl::MakeFragmentedCord(parts);
auto array = TryViewCordAsArray(cord, 0, dtype_v<uint32_t>,
endian::native, layout);
EXPECT_FALSE(array.valid());
}
TEST(TryViewCordAsArrayTest, SmallBuffer) {
StridedLayout<> layout(tensorstore::c_order, sizeof(uint8_t), {4});
absl::Cord cord("abcd");
auto array = TryViewCordAsArray(cord, 0, dtype_v<uint8_t>,
endian::native, layout);
EXPECT_FALSE(array.valid());
}
} |
628 | cpp | google/tensorstore | nditerable_util | tensorstore/internal/nditerable_util.cc | tensorstore/internal/nditerable_util_test.cc | #ifndef TENSORSTORE_INTERNAL_NDITERABLE_UTIL_H_
#define TENSORSTORE_INTERNAL_NDITERABLE_UTIL_H_
#include <algorithm>
#include <cassert>
#include <type_traits>
#include <utility>
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
struct NDIterationFullLayoutInfo;
struct NDIterationSimplifiedLayoutInfo;
template <bool Full = false>
using NDIterationLayoutInfo =
std::conditional_t<Full, NDIterationFullLayoutInfo,
NDIterationSimplifiedLayoutInfo>;
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints,
NDIterationSimplifiedLayoutInfo* info);
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints,
NDIterationFullLayoutInfo* info);
struct NDIterationSimplifiedLayoutInfo {
NDIterationSimplifiedLayoutInfo() = default;
NDIterationSimplifiedLayoutInfo(const NDIterableLayoutConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints) {
GetNDIterationLayoutInfo(iterable, shape, constraints, this);
}
NDIterable::IterationLayoutView layout_view() const {
return {shape, directions,
iteration_dimensions,
iteration_shape};
}
bool empty;
absl::InlinedVector<Index, kNumInlinedDims> shape;
absl::InlinedVector<int, kNumInlinedDims> directions;
absl::InlinedVector<DimensionIndex, kNumInlinedDims> iteration_dimensions;
absl::InlinedVector<Index, kNumInlinedDims> iteration_shape;
};
struct NDIterationFullLayoutInfo : public NDIterationSimplifiedLayoutInfo {
NDIterationFullLayoutInfo() = default;
NDIterationFullLayoutInfo(const NDIterableLayoutConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints) {
GetNDIterationLayoutInfo(iterable, shape, constraints, this);
}
absl::InlinedVector<DimensionIndex, kNumInlinedDims>
full_iteration_dimensions;
};
struct NDIterationBufferInfo {
IterationBufferKind buffer_kind;
IterationBufferShape block_shape;
};
IterationBufferShape GetNDIterationBlockShape(
std::ptrdiff_t working_memory_bytes_per_element,
span<const Index> iteration_shape);
IterationBufferShape GetNDIterationBlockShape(
const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout, IterationBufferKind buffer_kind);
void GetNDIterationBufferInfo(const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout,
NDIterationBufferInfo* buffer_info);
template <bool Full = false>
struct NDIterationInfo : public NDIterationLayoutInfo<Full>,
public NDIterationBufferInfo {
NDIterationInfo() = default;
explicit NDIterationInfo(const NDIterableBufferConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints) {
GetNDIterationLayoutInfo(iterable, shape, constraints, this);
GetNDIterationBufferInfo(iterable, this->layout_view(), this);
}
NDIterable::IterationBufferKindLayoutView buffer_layout_view() const {
return {{this->layout_view(), this->block_shape}, this->buffer_kind};
}
};
template <typename Iterables, typename Base = NDIterableLayoutConstraint>
struct CompositeNDIterableLayoutConstraint : public Base {
CompositeNDIterableLayoutConstraint(Iterables iterables)
: iterables(std::move(iterables)) {}
Iterables iterables;
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
int max_magnitude_order = 0;
for (const auto& iterable : iterables) {
int order = iterable->GetDimensionOrder(dim_i, dim_j);
if (std::abs(order) > std::abs(max_magnitude_order)) {
max_magnitude_order = order;
}
}
return max_magnitude_order;
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
for (const auto& iterable : iterables) {
iterable->UpdateDirectionPrefs(prefs);
}
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
for (const auto& iterable : iterables) {
if (!iterable->CanCombineDimensions(dim_i, dir_i, dim_j, dir_j, size_j)) {
return false;
}
}
return true;
}
};
inline Index StepBufferPositionForward(span<const Index> shape, Index step,
Index max_buffer_size, Index* position) {
const DimensionIndex rank = shape.size();
assert(rank > 0);
assert(step >= 0);
assert(max_buffer_size >= 0 && max_buffer_size <= shape[rank - 1]);
const Index remainder = shape[rank - 1] - position[rank - 1];
assert(remainder >= step);
position[rank - 1] += step;
if (remainder != step) {
return std::min(max_buffer_size, remainder - step);
}
for (DimensionIndex i = rank - 1; i > 0;) {
position[i] = 0;
--i;
if (++position[i] < shape[i]) {
return max_buffer_size;
}
}
return 0;
}
inline Index StepBufferPositionBackward(span<const Index> shape,
Index max_buffer_size,
Index* position) {
const DimensionIndex rank = shape.size();
assert(rank > 0);
assert(max_buffer_size > 0);
assert(max_buffer_size <= shape[rank - 1]);
const Index remainder = position[rank - 1];
if (remainder != 0) {
const Index buffer_size = std::min(max_buffer_size, remainder);
position[rank - 1] -= buffer_size;
return buffer_size;
}
DimensionIndex i = rank - 2;
while (true) {
if (i < 0) return 0;
if (position[i] != 0) {
position[i]--;
break;
}
--i;
}
++i;
while (i < rank - 1) {
position[i] = shape[i] - 1;
++i;
}
position[rank - 1] = shape[rank - 1] - max_buffer_size;
return max_buffer_size;
}
inline void ResetBufferPositionAtBeginning(span<Index> position) {
std::fill_n(position.begin(), position.size(), Index(0));
}
inline void ResetBufferPositionAtEnd(span<const Index> shape, Index step,
Index* position) {
const DimensionIndex rank = shape.size();
assert(rank > 0);
assert(step >= 0);
assert(step <= shape[rank - 1]);
for (DimensionIndex i = 0; i < rank - 1; ++i) {
position[i] = shape[i] - 1;
}
position[rank - 1] = shape[rank - 1] - step;
}
inline void FillOffsetsArrayFromStride(Index outer_byte_stride,
Index inner_byte_stride,
Index outer_size, Index inner_size,
Index* offsets_array) {
for (Index outer_i = 0; outer_i < outer_size; ++outer_i) {
for (Index inner_i = 0; inner_i < inner_size; ++inner_i) {
*(offsets_array++) = wrap_on_overflow::Add(
wrap_on_overflow::Multiply(outer_byte_stride, outer_i),
wrap_on_overflow::Multiply(inner_byte_stride, inner_i));
}
}
}
class NDIterationPositionStepper {
public:
NDIterationPositionStepper(span<const Index> shape, Index block_size)
: position_(shape.size()),
shape_(shape.data()),
block_size_(block_size) {}
Index ResetAtBeginning() {
ResetBufferPositionAtBeginning(position_);
return block_size_;
}
Index ResetAtEnd() {
ResetBufferPositionAtEnd(shape(), block_size_, position_.data());
return block_size_;
}
Index StepForward(Index step) {
return StepBufferPositionForward(shape(), step, block_size_,
position_.data());
}
Index StepBackward() {
return StepBufferPositionBackward(shape(), block_size_, position_.data());
}
span<Index> position() { return position_; }
span<const Index> position() const { return position_; }
span<const Index> shape() const { return span(shape_, position_.size()); }
Index block_size() const { return block_size_; }
Index& block_size() { return block_size_; }
private:
absl::FixedArray<Index, kNumInlinedDims> position_;
const Index* shape_;
Index block_size_;
};
class DefaultNDIterableArena {
public:
DefaultNDIterableArena()
: arena_( (buffer_[0] = 0, buffer_)) {}
operator Arena*() { return &arena_; }
template <typename T>
operator ArenaAllocator<T>() {
return &arena_;
}
private:
unsigned char buffer_[32 * 1024];
tensorstore::internal::Arena arena_;
};
#ifndef NDEBUG
void SetNDIterableTestUnitBlockSize(bool value);
#endif
Index UpdatePartialBlock(NDIterator& iterator, span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferKind buffer_kind,
IterationBufferPointer buffer, Index modified_count,
absl::Status* status);
inline Index UpdateBlock(NDIterator& iterator, span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferKind buffer_kind,
IterationBufferPointer buffer, Index modified_count,
absl::Status* status) {
if (ABSL_PREDICT_FALSE(modified_count != block_shape[0] * block_shape[1])) {
return UpdatePartialBlock(iterator, indices, block_shape, buffer_kind,
buffer, modified_count, status);
}
return iterator.UpdateBlock(indices, block_shape, buffer, status);
}
}
}
#endif
#include "tensorstore/internal/nditerable_util.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
#ifndef NDEBUG
bool nditerable_use_unit_block_size = false;
#endif
template <bool Full>
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints,
NDIterationLayoutInfo<Full>* info) {
info->shape.assign(shape.begin(), shape.end());
info->directions.resize(shape.size());
info->iteration_dimensions.clear();
info->iteration_shape.clear();
if constexpr (Full) {
info->full_iteration_dimensions.clear();
}
info->empty = false;
using DirectionPref = NDIterableLayoutConstraint::DirectionPref;
DirectionPref direction_prefs[kMaxRank];
std::fill_n(
direction_prefs, shape.size(),
constraints.repeated_elements_constraint() == skip_repeated_elements
? DirectionPref::kCanSkip
: DirectionPref::kEither);
iterable.UpdateDirectionPrefs(direction_prefs);
for (DimensionIndex dim_i = 0; dim_i < shape.size(); ++dim_i) {
const Index size = shape[dim_i];
if (size == 0) {
info->empty = true;
} else if ((size == 1 &&
direction_prefs[dim_i] != DirectionPref::kForwardRequired) ||
direction_prefs[dim_i] == DirectionPref::kCanSkip) {
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_i);
}
continue;
}
info->iteration_dimensions.push_back(dim_i);
}
if (info->iteration_dimensions.empty()) {
info->iteration_dimensions.push_back(-1);
info->iteration_dimensions.push_back(-1);
info->iteration_shape.push_back(1);
info->iteration_shape.push_back(1);
} else {
if (constraints.order_constraint() == ContiguousLayoutOrder::fortran) {
std::reverse(info->iteration_dimensions.begin(),
info->iteration_dimensions.end());
} else if (constraints.order_constraint() == unspecified_order) {
std::sort(info->iteration_dimensions.begin(),
info->iteration_dimensions.end(),
[&](DimensionIndex dim_i, DimensionIndex dim_j) {
return iterable.GetDimensionOrder(dim_i, dim_j) < 0;
});
}
DimensionIndex dim_i = info->iteration_dimensions[0];
Index size_i = shape[dim_i];
info->iteration_shape.push_back(size_i);
int dir_i =
NDIterableLayoutConstraint::GetDirection(direction_prefs[dim_i]);
info->directions[dim_i] = dir_i;
auto next_iteration_dim_it = info->iteration_dimensions.begin();
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_i);
}
for (DimensionIndex i = 1;
i < static_cast<DimensionIndex>(info->iteration_dimensions.size());
++i) {
DimensionIndex dim_j = info->iteration_dimensions[i];
Index size_j = shape[dim_j];
int dir_j =
NDIterableLayoutConstraint::GetDirection(direction_prefs[dim_j]);
info->directions[dim_j] = dir_j;
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_j);
}
Index size_combined;
if (iterable.CanCombineDimensions(dim_i, dir_i, dim_j, dir_j, size_j) &&
!MulOverflow(size_i, size_j, &size_combined)) {
size_j = size_combined;
info->iteration_shape.back() = size_combined;
} else {
info->iteration_shape.push_back(size_j);
++next_iteration_dim_it;
}
*next_iteration_dim_it = dim_j;
dim_i = dim_j;
size_i = size_j;
dir_i = dir_j;
}
info->iteration_dimensions.erase(next_iteration_dim_it + 1,
info->iteration_dimensions.end());
}
if (info->iteration_dimensions.size() < 2) {
assert(info->iteration_dimensions.size() == 1);
info->iteration_dimensions.insert(info->iteration_dimensions.begin(), -1);
info->iteration_shape.insert(info->iteration_shape.begin(), 1);
}
}
}
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints,
NDIterationSimplifiedLayoutInfo* info) {
GetNDIterationLayoutInfo<false>(iterable, shape, constraints, info);
}
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
span<const Index> shape,
IterationConstraints constraints,
NDIterationFullLayoutInfo* info) {
GetNDIterationLayoutInfo<true>(iterable, shape, constraints, info);
}
IterationBufferShape GetNDIterationBlockShape(
ptrdiff_t working_memory_bytes_per_element,
span<const Index> iteration_shape) {
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
return {1, 1};
#else
#if !defined(NDEBUG)
if (nditerable_use_unit_block_size) {
return {1, 1};
}
#endif
constexpr Index kTargetMemoryUsage = 24 * 1024;
const Index penultimate_dimension_size =
iteration_shape[iteration_shape.size() - 2];
const Index last_dimension_size = iteration_shape[iteration_shape.size() - 1];
if (working_memory_bytes_per_element == 0) {
return {penultimate_dimension_size, last_dimension_size};
} else {
const Index target_size = std::max(
Index(8), kTargetMemoryUsage / Index(working_memory_bytes_per_element));
const Index block_inner_size =
std::max(Index(1), std::min(last_dimension_size, target_size));
Index block_outer_size = 1;
if (block_inner_size < target_size) {
block_outer_size =
std::min(penultimate_dimension_size, target_size / block_inner_size);
}
return {block_outer_size, block_inner_size};
}
#endif
}
IterationBufferShape GetNDIterationBlockShape(
const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout, IterationBufferKind buffer_kind) {
return GetNDIterationBlockShape(
iterable.GetWorkingMemoryBytesPerElement(layout, buffer_kind),
layout.iteration_shape);
}
void GetNDIterationBufferInfo(const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout,
NDIterationBufferInfo* buffer_info) {
buffer_info->buffer_kind =
iterable.GetIterationBufferConstraint(layout).min_buffer_kind;
buffer_info->block_shape =
GetNDIterationBlockShape(iterable, layout, buffer_info->buffer_kind);
}
#ifndef NDEBUG
void SetNDIterableTestUnitBlockSize(bool value) {
nditerable_use_unit_block_size = value;
}
#endif
Index UpdatePartialBlock(NDIterator& iterator, span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferKind buffer_kind,
IterationBufferPointer buffer, Index modified_count,
absl::Status* status) {
Index full_rows = modified_count / block_shape[1];
Index final_row_count = modified_count % block_shape[1];
Index updated = 0;
if (full_rows != 0) {
updated = iterator.UpdateBlock(indices, {full_rows, block_shape[1]}, buffer,
status);
if (ABSL_PREDICT_FALSE(updated != full_rows * block_shape[1])) {
return updated;
}
}
if (final_row_count != 0) {
buffer.AddElementOffset(buffer_kind, full_rows, 0);
Index final_row_indices[kMaxRank];
std::copy(indices.begin(), indices.end(), final_row_indices);
final_row_indices[indices.size() - 2] += full_rows;
updated += iterator.UpdateBlock(
span<const Index>(final_row_indices, indices.size()),
{1, final_row_count}, buffer, status);
}
return updated;
}
}
} | #include "tensorstore/internal/nditerable_util.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::span;
using ::tensorstore::internal::GetNDIterationBlockShape;
using ::tensorstore::internal::NDIterationPositionStepper;
using ::tensorstore::internal::ResetBufferPositionAtBeginning;
using ::tensorstore::internal::ResetBufferPositionAtEnd;
using ::tensorstore::internal::StepBufferPositionBackward;
using ::tensorstore::internal::StepBufferPositionForward;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TEST(GetNDIterationBlockShape, Basic) {
#ifndef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
constexpr auto expected_block_size = [](Index block_size) {
return block_size;
};
#else
constexpr auto expected_block_size = [](Index block_size) { return 1; };
#endif
EXPECT_THAT(
GetNDIterationBlockShape(0,
span<const Index>({3, 4, 1000000})),
ElementsAre(expected_block_size(4), expected_block_size(1000000)));
EXPECT_THAT(GetNDIterationBlockShape(1,
span<const Index>({3, 4, 15})),
ElementsAre(expected_block_size(4), expected_block_size(15)));
EXPECT_THAT(GetNDIterationBlockShape(1,
span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(24 * 1024)));
EXPECT_THAT(GetNDIterationBlockShape(32,
span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(768)));
EXPECT_THAT(GetNDIterationBlockShape(64,
span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(384)));
}
TEST(ResetBufferPositionTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{42};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0));
ResetBufferPositionAtEnd(shape, 1, position.data());
EXPECT_THAT(position, ElementsAre(9));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(6));
}
TEST(ResetBufferPositionTest, TwoDimensional) {
std::vector<Index> shape{10, 15};
std::vector<Index> position{42, 43};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0, 0));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(9, 11));
}
TEST(ResetBufferPositionTest, ThreeDimensional) {
std::vector<Index> shape{10, 15, 19};
std::vector<Index> position{42, 43, 44};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0, 0, 0));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(9, 14, 15));
}
TEST(StepBufferPositionForwardTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{0};
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(8));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(10));
}
TEST(StepBufferPositionForwardTest, TwoDimensional) {
std::vector<Index> shape{2, 10};
std::vector<Index> position{0, 0};
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 8));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 8));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(2, 0));
}
TEST(StepBufferPositionForwardTest, ThreeDimensional) {
std::vector<Index> shape{2, 2, 6};
std::vector<Index> position{0, 0, 0};
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 4));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(2, 0, 0));
}
TEST(StepBufferPositionBackwardTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0));
}
TEST(StepBufferPositionBackwardTest, TwoDimensional) {
std::vector<Index> shape{2, 10};
std::vector<Index> position{1, 6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0));
}
TEST(StepBufferPositionBackwardTest, ThreeDimensional) {
std::vector<Index> shape{2, 2, 10};
std::vector<Index> position{1, 1, 6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 0));
}
TEST(NDIterationPositionStepperTest, Forward) {
std::vector<Index> shape({2, 3, 7});
NDIterationPositionStepper stepper(shape, 4);
EXPECT_THAT(stepper.shape(), ElementsAreArray(shape));
using PositionsAndBlockSizes =
std::vector<std::pair<std::vector<Index>, Index>>;
PositionsAndBlockSizes expected_results{
{{0, 0, 0}, 4}, {{0, 0, 4}, 3},
{{0, 1, 0}, 4}, {{0, 1, 4}, 3},
{{0, 2, 0}, 4}, {{0, 2, 4}, 3},
{{1, 0, 0}, 4}, {{1, 0, 4}, 3},
{{1, 1, 0}, 4}, {{1, 1, 4}, 3},
{{1, 2, 0}, 4}, {{1, 2, 4}, 3},
};
PositionsAndBlockSizes results;
for (Index block_size = stepper.ResetAtBeginning(); block_size;
block_size = stepper.StepForward(block_size)) {
results.emplace_back(
std::vector(stepper.position().begin(), stepper.position().end()),
block_size);
}
EXPECT_THAT(results, ElementsAreArray(expected_results));
}
TEST(NDIterationPositionStepperTest, Backward) {
std::vector<Index> shape({2, 3, 7});
NDIterationPositionStepper stepper(shape, 4);
EXPECT_THAT(stepper.shape(), ElementsAreArray(shape));
using PositionsAndBlockSizes =
std::vector<std::pair<std::vector<Index>, Index>>;
PositionsAndBlockSizes expected_results{
{{1, 2, 3}, 4}, {{1, 2, 0}, 3},
{{1, 1, 3}, 4}, {{1, 1, 0}, 3},
{{1, 0, 3}, 4}, {{1, 0, 0}, 3},
{{0, 2, 3}, 4}, {{0, 2, 0}, 3},
{{0, 1, 3}, 4}, {{0, 1, 0}, 3},
{{0, 0, 3}, 4}, {{0, 0, 0}, 3},
};
PositionsAndBlockSizes results;
for (Index block_size = stepper.ResetAtEnd(); block_size;
block_size = stepper.StepBackward()) {
results.emplace_back(
std::vector(stepper.position().begin(), stepper.position().end()),
block_size);
}
EXPECT_THAT(results, ElementsAreArray(expected_results));
}
} |
629 | cpp | google/tensorstore | irregular_grid | tensorstore/internal/irregular_grid.cc | tensorstore/internal/irregular_grid_test.cc | #ifndef TENSORSTORE_INTERNAL_IRREGULAR_GRID_H_
#define TENSORSTORE_INTERNAL_IRREGULAR_GRID_H_
#include <assert.h>
#include <vector>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
class IrregularGrid {
public:
IrregularGrid() = default;
IrregularGrid(std::vector<std::vector<Index>> unsorted_inclusive_mins);
static IrregularGrid Make(span<const IndexDomainView<>> domains);
static IrregularGrid Make(span<const IndexDomain<>> domains);
Index operator()(DimensionIndex dim, Index output_index,
IndexInterval* cell_bounds) const;
DimensionIndex rank() const { return shape_.size(); }
span<const Index> shape() const { return shape_; }
span<const Index> inclusive_min(DimensionIndex r) const {
assert(r >= 0);
assert(r < rank());
return inclusive_mins_[r];
}
std::vector<Index> cell_origin(span<const Index> indices) const {
assert(indices.size() == rank());
std::vector<Index> origin;
origin.reserve(rank());
for (size_t i = 0; i < indices.size(); i++) {
auto x = indices[i];
if (x < 0) {
origin.push_back(-kInfIndex);
} else if (x >= shape_[i]) {
origin.push_back(kInfIndex);
} else {
origin.push_back(inclusive_mins_[i][x]);
}
}
return origin;
}
private:
std::vector<Index> shape_;
std::vector<std::vector<Index>> inclusive_mins_;
};
}
}
#endif
#include "tensorstore/internal/irregular_grid.h"
#include <assert.h>
#include <stddef.h>
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
IrregularGrid::IrregularGrid(std::vector<std::vector<Index>> inclusive_mins)
: shape_(inclusive_mins.size(), 0),
inclusive_mins_(std::move(inclusive_mins)) {
for (size_t i = 0; i < inclusive_mins_.size(); i++) {
std::sort(inclusive_mins_[i].begin(), inclusive_mins_[i].end());
auto new_it =
std::unique(inclusive_mins_[i].begin(), inclusive_mins_[i].end());
inclusive_mins_[i].resize(
std::distance(inclusive_mins_[i].begin(), new_it));
shape_[i] = inclusive_mins_[i].size() - 1;
}
}
Index IrregularGrid::operator()(DimensionIndex dim, Index output_index,
IndexInterval* cell_bounds) const {
auto points = inclusive_min(dim);
auto it = std::upper_bound(points.begin(), points.end(), output_index);
Index cell = std::distance(points.begin(), it) - 1;
if (cell_bounds) {
if (cell < 0) {
*cell_bounds = IndexInterval::UncheckedHalfOpen(-kInfIndex, points[0]);
} else if (cell < points.size() - 1) {
*cell_bounds =
IndexInterval::UncheckedHalfOpen(points[cell], points[cell + 1]);
} else {
*cell_bounds = IndexInterval::UncheckedClosed(points[cell], kInfIndex);
}
}
return cell;
}
IrregularGrid IrregularGrid::Make(span<const IndexDomain<>> domains) {
absl::InlinedVector<IndexDomainView<>, 16> views;
views.reserve(domains.size());
for (const auto& d : domains) views.push_back(d);
return Make(span(views));
}
IrregularGrid IrregularGrid::Make(span<const IndexDomainView<>> domains) {
assert(!domains.empty());
DimensionIndex rank = domains[0].rank();
std::vector<std::vector<Index>> inclusive_mins;
inclusive_mins.resize(rank);
for (auto& d : domains) {
assert(d.rank() == rank);
for (DimensionIndex i = 0; i < rank; i++) {
if (inclusive_mins[i].empty() ||
inclusive_mins[i].back() != d[i].inclusive_min()) {
inclusive_mins[i].push_back(d[i].inclusive_min());
}
inclusive_mins[i].push_back(d[i].exclusive_max());
}
}
return IrregularGrid(std::move(inclusive_mins));
}
}
} | #include "tensorstore/internal/irregular_grid.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexInterval;
using ::tensorstore::kInfIndex;
using ::tensorstore::span;
using ::tensorstore::internal::IrregularGrid;
using ::testing::ElementsAre;
TEST(IrregularGridTest, Basic) {
std::vector<Index> dimension0{2, 0, -3};
std::vector<Index> dimension1{10, 45, 20, 30};
auto grid = IrregularGrid({dimension0, dimension1});
EXPECT_EQ(2, grid.rank());
EXPECT_THAT(grid.shape(), ElementsAre(2, 3));
EXPECT_THAT(grid.inclusive_min(0), ElementsAre(-3, 0, 2));
EXPECT_THAT(grid.inclusive_min(1), ElementsAre(10, 20, 30, 45));
IndexInterval grid_cell;
EXPECT_EQ(grid(0, -4, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, -4));
EXPECT_EQ(grid(0, -3, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -2, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -1, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, 0, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 1, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 2, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(0, 3, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(1, 7, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, 9));
EXPECT_EQ(grid(1, 11, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(10, 10));
EXPECT_EQ(grid(1, 57, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(45, kInfIndex));
}
TEST(IrregularGridTest, IndexDomain) {
const Index origin1[] = {-3, 10};
const Index shape1[] = {3, 10};
const Index origin2[] = {0, 20};
const Index shape2[] = {2, 10};
const Index origin3[] = {0, 30};
const Index shape3[] = {2, 15};
std::vector<IndexDomain<>> domains(
{IndexDomain<>{BoxView<>{span(origin1), span(shape1)}},
IndexDomain<>{BoxView<>{span(origin2), span(shape2)}},
IndexDomain<>{BoxView<>{span(origin3), span(shape3)}}});
auto grid = IrregularGrid::Make(domains);
EXPECT_EQ(2, grid.rank());
EXPECT_THAT(grid.shape(), ElementsAre(2, 3));
EXPECT_THAT(grid.inclusive_min(0), ElementsAre(-3, 0, 2));
EXPECT_THAT(grid.inclusive_min(1), ElementsAre(10, 20, 30, 45));
IndexInterval grid_cell;
EXPECT_EQ(grid(0, -4, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, -4));
EXPECT_EQ(grid(0, -3, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -2, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -1, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, 0, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 1, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 2, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(0, 3, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(1, 7, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, 9));
EXPECT_EQ(grid(1, 11, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(10, 10));
EXPECT_EQ(grid(1, 57, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(45, kInfIndex));
}
TEST(IrregularGridTest, Rank0) {
std::vector<std::vector<Index>> inclusive_mins;
auto grid = IrregularGrid(inclusive_mins);
EXPECT_EQ(0, grid.rank());
EXPECT_TRUE(grid.shape().empty());
EXPECT_TRUE(grid.cell_origin({}).empty());
}
} |
630 | cpp | google/tensorstore | nditerable_copy | tensorstore/internal/nditerable_copy.cc | tensorstore/internal/nditerable_copy_test.cc | #ifndef TENSORSTORE_INTERNAL_NDITERABLE_COPY_H_
#define TENSORSTORE_INTERNAL_NDITERABLE_COPY_H_
#include <array>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
struct NDIterableCopyManager
: public CompositeNDIterableLayoutConstraint<
std::array<const NDIterable*, 2>, NDIterableLayoutConstraint> {
using Base =
CompositeNDIterableLayoutConstraint<std::array<const NDIterable*, 2>,
NDIterableLayoutConstraint>;
enum class BufferSource {
kBoth,
kInput,
kOutput,
kExternal,
};
struct BufferParameters {
BufferSource buffer_source;
IterationBufferKind input_buffer_kind;
IterationBufferKind output_buffer_kind;
};
NDIterableCopyManager(const NDIterable* input, const NDIterable* output);
BufferParameters GetBufferParameters(
NDIterable::IterationLayoutView layout) const;
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
NDIterable::IterationLayoutView layout) const;
const NDIterable* input() const { return this->iterables[0]; }
const NDIterable* output() const { return this->iterables[1]; }
};
struct NDIteratorCopyManager {
public:
NDIteratorCopyManager(const NDIterableCopyManager& iterable,
NDIterable::IterationBufferLayoutView layout,
ArenaAllocator<> allocator);
bool Copy(span<const Index> indices, IterationBufferShape block_shape,
absl::Status* status) {
return copy_impl_(this, indices, block_shape, status);
}
private:
NDIterator::Ptr input_;
NDIterator::Ptr output_;
using CopyImpl = bool (*)(NDIteratorCopyManager* self,
span<const Index> indices,
IterationBufferShape block_shape,
absl::Status* status);
CopyImpl copy_impl_;
SpecializedElementwiseFunctionPointer<2, void*> copy_elements_function_;
NDIteratorExternalBufferManager<1, 2> buffer_manager_;
};
struct NDIterableCopier {
NDIterableCopier(const NDIterable& input, const NDIterable& output,
span<const Index> shape, IterationConstraints constraints,
Arena* arena);
NDIterableCopier(const NDIterable& input, const NDIterable& output,
span<const Index> shape, Arena* arena)
: NDIterableCopier(input, output, shape, skip_repeated_elements, arena) {}
absl::Status Copy();
const NDIterationLayoutInfo<>& layout_info() const { return layout_info_; }
span<const Index> position() const {
return span<const Index>(position_, layout_info_.iteration_shape.size());
}
NDIteratorCopyManager& iterator_copy_manager() {
return iterator_copy_manager_;
}
private:
NDIterableCopier(const NDIterableCopyManager& iterable_copy_manager,
span<const Index> shape, IterationConstraints constraints,
Arena* arena);
NDIterationLayoutInfo<> layout_info_;
IterationBufferShape block_shape_;
Index position_[kMaxRank];
NDIteratorCopyManager iterator_copy_manager_;
};
}
}
#endif
#include "tensorstore/internal/nditerable_copy.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <memory>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
NDIterableCopyManager::NDIterableCopyManager(const NDIterable* input,
const NDIterable* output)
: Base{{{input, output}}} {
assert(input->dtype() == output->dtype());
}
NDIterableCopyManager::BufferParameters
NDIterableCopyManager::GetBufferParameters(
NDIterable::IterationLayoutView layout) const {
BufferParameters result;
auto input_constraint = input()->GetIterationBufferConstraint(layout);
auto output_constraint = output()->GetIterationBufferConstraint(layout);
if (!input_constraint.external || !output_constraint.external) {
result.input_buffer_kind = result.output_buffer_kind = std::max(
input_constraint.min_buffer_kind, output_constraint.min_buffer_kind);
} else {
result.input_buffer_kind = input_constraint.min_buffer_kind;
result.output_buffer_kind = output_constraint.min_buffer_kind;
}
result.buffer_source =
input_constraint.external
? (output_constraint.external ? BufferSource::kExternal
: BufferSource::kOutput)
: (output_constraint.external ? BufferSource::kInput
: BufferSource::kBoth);
return result;
}
std::ptrdiff_t NDIterableCopyManager::GetWorkingMemoryBytesPerElement(
NDIterable::IterationLayoutView layout) const {
auto buffer_parameters = GetBufferParameters(layout);
std::ptrdiff_t num_bytes = 0;
num_bytes += input()->GetWorkingMemoryBytesPerElement(
layout, buffer_parameters.input_buffer_kind);
num_bytes += output()->GetWorkingMemoryBytesPerElement(
layout, buffer_parameters.output_buffer_kind);
if (buffer_parameters.buffer_source == BufferSource::kExternal) {
num_bytes += input()->dtype()->size;
if (std::max(buffer_parameters.input_buffer_kind,
buffer_parameters.output_buffer_kind) ==
IterationBufferKind::kIndexed) {
num_bytes += sizeof(Index);
}
}
return num_bytes;
}
NDIteratorCopyManager::NDIteratorCopyManager(
const NDIterableCopyManager& iterable,
NDIterable::IterationBufferLayoutView layout, ArenaAllocator<> allocator)
: buffer_manager_(allocator) {
auto buffer_parameters = iterable.GetBufferParameters(layout);
input_ = iterable.input()->GetIterator(
{layout, buffer_parameters.input_buffer_kind});
output_ = iterable.output()->GetIterator(
{layout, buffer_parameters.output_buffer_kind});
switch (buffer_parameters.buffer_source) {
case NDIterableCopyManager::BufferSource::kBoth:
copy_elements_function_ =
iterable.input()
->dtype()
->copy_assign[buffer_parameters.input_buffer_kind];
break;
case NDIterableCopyManager::BufferSource::kExternal:
buffer_manager_.Initialize(layout.block_shape,
{{iterable.input()->dtype()}},
{{{{buffer_parameters.input_buffer_kind,
buffer_parameters.output_buffer_kind}}}});
break;
default:
break;
}
constexpr static CopyImpl kCopyImpls[] = {
[](NDIteratorCopyManager* self, span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) -> bool {
IterationBufferPointer input_pointer, output_pointer;
return self->input_->GetBlock(indices, block_shape, &input_pointer,
status) &&
self->output_->GetBlock(indices, block_shape, &output_pointer,
status) &&
self->copy_elements_function_(nullptr, block_shape,
input_pointer, output_pointer,
status) &&
self->output_->UpdateBlock(indices, block_shape, output_pointer,
status);
},
[](NDIteratorCopyManager* self, span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) -> bool {
IterationBufferPointer pointer;
return self->input_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->GetBlock(indices, block_shape, &pointer,
status) &&
self->output_->UpdateBlock(indices, block_shape, pointer,
status);
},
[](NDIteratorCopyManager* self, span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) -> bool {
IterationBufferPointer pointer;
return self->output_->GetBlock(indices, block_shape, &pointer,
status) &&
self->input_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->UpdateBlock(indices, block_shape, pointer,
status);
},
[](NDIteratorCopyManager* self, span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) -> bool {
return self->input_->GetBlock(
indices, block_shape,
&self->buffer_manager_.buffer_pointers()[0][0], status) &&
self->output_->GetBlock(
indices, block_shape,
&self->buffer_manager_.buffer_pointers()[1][0], status) &&
self->output_->UpdateBlock(
indices, block_shape,
self->buffer_manager_.buffer_pointers()[1][0], status);
},
};
copy_impl_ = kCopyImpls[static_cast<int>(buffer_parameters.buffer_source)];
}
NDIterableCopier::NDIterableCopier(const NDIterable& input,
const NDIterable& output,
span<const Index> shape,
IterationConstraints constraints,
Arena* arena)
: NDIterableCopier(NDIterableCopyManager(&input, &output), shape,
constraints, arena) {}
NDIterableCopier::NDIterableCopier(
const NDIterableCopyManager& iterable_copy_manager, span<const Index> shape,
IterationConstraints constraints, Arena* arena)
: layout_info_(iterable_copy_manager, shape, constraints),
block_shape_(GetNDIterationBlockShape(
iterable_copy_manager.GetWorkingMemoryBytesPerElement(
layout_info_.layout_view()),
layout_info_.iteration_shape)),
iterator_copy_manager_(iterable_copy_manager,
{layout_info_.layout_view(), block_shape_},
arena) {}
absl::Status NDIterableCopier::Copy() {
span<const Index> iteration_shape = layout_info_.iteration_shape;
std::fill_n(position_, iteration_shape.size(), static_cast<Index>(0));
if (layout_info_.empty) {
return absl::OkStatus();
}
absl::Status copy_status;
if (Index inner_block_size = block_shape_[1];
inner_block_size != iteration_shape.back()) {
assert(block_shape_[0] == 1);
for (Index block_size = inner_block_size; block_size;) {
if (!iterator_copy_manager_.Copy(
span<const Index>(position_, iteration_shape.size()),
{1, block_size}, ©_status)) {
return GetElementCopyErrorStatus(std::move(copy_status));
}
block_size = StepBufferPositionForward(iteration_shape, block_size,
inner_block_size, position_);
}
} else {
const Index outer_block_size = block_shape_[0];
for (Index block_size = outer_block_size; block_size;) {
if (!iterator_copy_manager_.Copy(
span<const Index>(position_, iteration_shape.size()),
{block_size, inner_block_size}, ©_status)) {
return GetElementCopyErrorStatus(std::move(copy_status));
}
block_size = StepBufferPositionForward(
iteration_shape.first(iteration_shape.size() - 1), block_size,
outer_block_size, position_);
}
}
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/nditerable_copy.h"
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dtype_v;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::Shared;
using ::tensorstore::internal::GetElementwiseInputTransformNDIterable;
using ::tensorstore::internal::GetElementwiseOutputTransformNDIterable;
using ::tensorstore::internal::GetTransformedArrayNDIterable;
TEST(NDIterableCopyTest, Example) {
auto source_array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest_array = tensorstore::AllocateArray<int>(
{2, 3}, tensorstore::c_order, tensorstore::value_init);
auto dest_element_transform = [](const int* source, int* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 5) {
*status = absl::UnknownError("5");
return false;
}
*dest = *source;
return true;
};
tensorstore::internal::ElementwiseClosure<2, void*> dest_closure =
tensorstore::internal::SimpleElementwiseFunction<
decltype(dest_element_transform)(const int, int),
void*>::Closure(&dest_element_transform);
tensorstore::internal::Arena arena;
auto source_iterable =
GetTransformedArrayNDIterable(source_array, &arena).value();
auto dest_iterable = GetElementwiseOutputTransformNDIterable(
GetTransformedArrayNDIterable(dest_array, &arena).value(), dtype_v<int>,
dest_closure, &arena);
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *dest_iterable, dest_array.shape(),
tensorstore::c_order, &arena);
EXPECT_EQ(absl::UnknownError("5"), copier.Copy());
EXPECT_EQ(MakeArray<int>({{1, 2, 3}, {4, 0, 0}}), dest_array);
}
template <typename IntermediateElement, typename SourceArray,
typename SourceElementTransform, typename DestElementTransform,
typename DestArray>
absl::Status TestCopy(tensorstore::IterationConstraints constraints,
SourceArray source_array,
SourceElementTransform source_element_transform,
DestElementTransform dest_element_transform,
DestArray dest_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<2, void*> source_closure =
tensorstore::internal::SimpleElementwiseFunction<
SourceElementTransform(typename SourceArray::Element,
IntermediateElement),
void*>::Closure(&source_element_transform);
tensorstore::internal::ElementwiseClosure<2, void*> dest_closure =
tensorstore::internal::SimpleElementwiseFunction<
DestElementTransform(IntermediateElement,
typename DestArray::Element),
void*>::Closure(&dest_element_transform);
auto source_iterable = GetElementwiseInputTransformNDIterable(
{{GetTransformedArrayNDIterable(source_array, &arena).value()}},
dtype_v<IntermediateElement>, source_closure, &arena);
auto dest_iterable = GetElementwiseOutputTransformNDIterable(
GetTransformedArrayNDIterable(dest_array, &arena).value(),
dtype_v<IntermediateElement>, dest_closure, &arena);
return tensorstore::internal::NDIterableCopier(
*source_iterable, *dest_iterable, dest_array.shape(), constraints,
&arena)
.Copy();
}
TEST(NDIterableCopyTest, ExternalBuffer) {
for (const bool indexed_source : {false, true}) {
for (const bool indexed_dest : {false, true}) {
SCOPED_TRACE(absl::StrCat("indexed_source=", indexed_source,
", indexed_dest=", indexed_dest)
.c_str());
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
tensorstore::TransformedArray<Shared<const int>> tsource = source;
if (indexed_source) {
tsource = (source |
tensorstore::Dims(0, 1).OuterIndexArraySlice(
MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2})))
.value();
}
auto dest = tensorstore::AllocateArray<double>(source.shape());
tensorstore::TransformedArray<Shared<double>> tdest = dest;
if (indexed_dest) {
tdest =
(dest | tensorstore::Dims(0, 1).OuterIndexArraySlice(
MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2})))
.value();
}
EXPECT_EQ(absl::OkStatus(),
(TestCopy<unsigned int>(
{}, tsource,
[](const int* source, unsigned int* dest, void* status) {
*dest = *source * 2;
},
[](const unsigned int* source, double* dest, void* status) {
*dest = *source + 100.0;
},
tdest)));
EXPECT_EQ(tensorstore::MakeArray<double>(
{{102.0, 104.0, 106.0}, {108.0, 110.0, 112.0}}),
dest);
}
}
}
class MaybeUnitBlockSizeTest : public ::testing::TestWithParam<bool> {
public:
MaybeUnitBlockSizeTest() {
#ifndef NDEBUG
tensorstore::internal::SetNDIterableTestUnitBlockSize(GetParam());
#endif
}
~MaybeUnitBlockSizeTest() {
#ifndef NDEBUG
tensorstore::internal::SetNDIterableTestUnitBlockSize(false);
#endif
}
};
INSTANTIATE_TEST_SUITE_P(NormalBlockSize, MaybeUnitBlockSizeTest,
::testing::Values(false));
#ifndef NDEBUG
INSTANTIATE_TEST_SUITE_P(UnitBlockSize, MaybeUnitBlockSizeTest,
::testing::Values(true));
#endif
TEST_P(MaybeUnitBlockSizeTest, InnerIndexArray) {
constexpr size_t length = 5000;
auto source = tensorstore::AllocateArray<int>({length});
auto dest = tensorstore::AllocateArray<int>({length});
auto expected = tensorstore::AllocateArray<int>({length});
auto indices = tensorstore::AllocateArray<int64_t>({length});
for (int i = 0; i < length; ++i) {
source(i) = -i;
dest(i) = 42;
indices(i) = length - 1 - i;
expected(i) = -(length - 1 - i);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::TransformedArray<Shared<const int>> tsource,
source | tensorstore::Dims(0).IndexArraySlice(indices));
tensorstore::TransformedArray<Shared<int>> tdest = dest;
tensorstore::internal::Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto source_iterable, GetTransformedArrayNDIterable(tsource, &arena));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto dest_iterable, GetTransformedArrayNDIterable(tdest, &arena));
TENSORSTORE_ASSERT_OK(tensorstore::internal::NDIterableCopier(
*source_iterable, *dest_iterable, dest.shape(),
{}, &arena)
.Copy());
EXPECT_EQ(expected, dest);
}
} |
631 | cpp | google/tensorstore | grid_chunk_key_ranges | tensorstore/internal/grid_chunk_key_ranges.cc | tensorstore/internal/grid_chunk_key_ranges_test.cc | #ifndef TENSORSTORE_INTERNAL_GRID_CHUNK_KEY_RANGES_H_
#define TENSORSTORE_INTERNAL_GRID_CHUNK_KEY_RANGES_H_
#include <string>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_grid_partition {
class IndexTransformGridPartition;
}
namespace internal {
absl::Status GetChunkKeyRangesForRegularGridWithSemiLexicographicalKeys(
const internal_grid_partition::IndexTransformGridPartition& grid_partition,
IndexTransformView<> transform,
span<const DimensionIndex> grid_output_dimensions,
internal_grid_partition::OutputToGridCellFn output_to_grid_cell,
BoxView<> grid_bounds,
const LexicographicalGridIndexKeyFormatter& key_formatter,
absl::FunctionRef<absl::Status(std::string key,
span<const Index> grid_indices)>
handle_key,
absl::FunctionRef<absl::Status(KeyRange key_range, BoxView<> grid_bounds)>
handle_key_range);
}
}
#endif
#include "tensorstore/internal/grid_chunk_key_ranges.h"
#include <cassert>
#include <string>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
absl::Status GetChunkKeyRangesForRegularGridWithSemiLexicographicalKeys(
const internal_grid_partition::IndexTransformGridPartition& grid_partition,
IndexTransformView<> transform,
span<const DimensionIndex> grid_output_dimensions,
internal_grid_partition::OutputToGridCellFn output_to_grid_cell,
BoxView<> grid_bounds,
const LexicographicalGridIndexKeyFormatter& key_formatter,
absl::FunctionRef<absl::Status(std::string key,
span<const Index> grid_indices)>
handle_key,
absl::FunctionRef<absl::Status(KeyRange key_range, BoxView<> grid_bounds)>
handle_key_range) {
Box<dynamic_rank(kMaxRank)> grid_bounds_copy(grid_bounds);
assert(grid_output_dimensions.size() == grid_bounds.rank());
DimensionIndex cached_min_grid_index_for_lexicographical_order_dim = -1;
Index cached_min_grid_index_for_lexicographical_order;
const auto get_min_grid_index_for_lexicographical_order =
[&](DimensionIndex dim) {
if (dim == cached_min_grid_index_for_lexicographical_order_dim) {
return cached_min_grid_index_for_lexicographical_order;
}
cached_min_grid_index_for_lexicographical_order_dim = dim;
return cached_min_grid_index_for_lexicographical_order =
key_formatter.MinGridIndexForLexicographicalOrder(
dim, grid_bounds[dim]);
};
const auto forward_bounds =
[&](BoxView<> bounds, DimensionIndex outer_prefix_rank) -> absl::Status {
if (bounds.num_elements() == 1) {
return handle_key(key_formatter.FormatKey(bounds.origin()),
bounds.origin());
}
assert(outer_prefix_rank < bounds.rank());
if (bounds[outer_prefix_rank] == grid_bounds[outer_prefix_rank]) {
return handle_key_range(KeyRange::Prefix(key_formatter.FormatKey(
bounds.origin().first(outer_prefix_rank))),
bounds);
}
DimensionIndex key_dims = outer_prefix_rank + 1;
Index inclusive_max_indices[kMaxRank];
for (DimensionIndex i = 0; i < key_dims; ++i) {
inclusive_max_indices[i] = bounds[i].inclusive_max();
}
return handle_key_range(
KeyRange(key_formatter.FormatKey(bounds.origin().first(key_dims)),
KeyRange::PrefixExclusiveMax(key_formatter.FormatKey(
span<const Index>(&inclusive_max_indices[0], key_dims)))),
bounds);
};
const auto handle_interval = [&](BoxView<> bounds) -> absl::Status {
DimensionIndex outer_prefix_rank = 0;
while (outer_prefix_rank < bounds.rank() &&
bounds.shape()[outer_prefix_rank] == 1) {
++outer_prefix_rank;
}
if (outer_prefix_rank == bounds.rank() ||
bounds[outer_prefix_rank] == grid_bounds[outer_prefix_rank]) {
return forward_bounds(bounds, outer_prefix_rank);
}
const Index min_index_for_lexicographical_order =
get_min_grid_index_for_lexicographical_order(outer_prefix_rank);
if (min_index_for_lexicographical_order <=
bounds.origin()[outer_prefix_rank]) {
return forward_bounds(bounds, outer_prefix_rank);
}
Box<dynamic_rank(kMaxRank)> new_bounds(bounds);
IndexInterval inner_interval = bounds[outer_prefix_rank];
while (!inner_interval.empty() && inner_interval.inclusive_min() <
min_index_for_lexicographical_order) {
new_bounds[outer_prefix_rank] =
IndexInterval::UncheckedSized(inner_interval.inclusive_min(), 1);
TENSORSTORE_RETURN_IF_ERROR(
forward_bounds(new_bounds, outer_prefix_rank + 1));
inner_interval = IndexInterval::UncheckedClosed(
inner_interval.inclusive_min() + 1, inner_interval.inclusive_max());
}
if (inner_interval.empty()) return absl::OkStatus();
new_bounds[outer_prefix_rank] = inner_interval;
return forward_bounds(new_bounds, inner_interval.size() == 1
? outer_prefix_rank + 1
: outer_prefix_rank);
};
return internal_grid_partition::GetGridCellRanges(
grid_partition, grid_output_dimensions, grid_bounds, output_to_grid_cell,
transform, handle_interval);
}
}
} | #include "tensorstore/internal/grid_chunk_key_ranges.h"
#include <cassert>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/grid_chunk_key_ranges_base10.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::CeilOfRatio;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::KeyRange;
using ::tensorstore::kMaxRank;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::internal::Base10LexicographicalGridIndexKeyParser;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
using ::testing::Optional;
using R = std::tuple<KeyRange, Box<>>;
absl::Status GetChunkKeyRangesForRegularGridWithBase10Keys(
IndexTransformView<> transform,
span<const DimensionIndex> grid_output_dimensions,
span<const Index> chunk_shape, span<const Index> shape,
char dimension_separator,
absl::FunctionRef<absl::Status(std::string key,
span<const Index> grid_indices)>
handle_key,
absl::FunctionRef<absl::Status(KeyRange key_range, BoxView<> grid_bounds)>
handle_key_range) {
const DimensionIndex rank = grid_output_dimensions.size();
assert(rank == chunk_shape.size());
assert(rank == shape.size());
Box<dynamic_rank(kMaxRank)> grid_bounds(rank);
for (DimensionIndex i = 0; i < shape.size(); ++i) {
const Index grid_size = CeilOfRatio(shape[i], chunk_shape[i]);
grid_bounds[i] = IndexInterval::UncheckedSized(0, grid_size);
}
RegularGridRef grid{chunk_shape};
IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, grid_partition));
return GetChunkKeyRangesForRegularGridWithSemiLexicographicalKeys(
grid_partition, transform, grid_output_dimensions, grid, grid_bounds,
Base10LexicographicalGridIndexKeyParser{rank, dimension_separator},
handle_key, handle_key_range);
}
Result<std::vector<R>> GetRanges(
IndexTransformView<> transform,
span<const DimensionIndex> grid_output_dimensions,
span<const Index> chunk_shape, span<const Index> shape,
char dimension_separator) {
std::vector<R> ranges;
const auto handle_key = [&](std::string key,
span<const Index> grid_indices) -> absl::Status {
ranges.emplace_back(
KeyRange::Singleton(key),
Box<>(grid_indices, std::vector<Index>(grid_indices.size(), 1)));
return absl::OkStatus();
};
const auto handle_key_range = [&](KeyRange key_range,
BoxView<> grid_bounds) -> absl::Status {
ranges.emplace_back(std::move(key_range), grid_bounds);
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(GetChunkKeyRangesForRegularGridWithBase10Keys(
transform, grid_output_dimensions, chunk_shape, shape,
dimension_separator, handle_key, handle_key_range));
return ranges;
}
TEST(ChunkKeyRangesTest, Rank0) {
EXPECT_THAT(GetRanges(IndexTransformBuilder(0, 0).Finalize().value(),
{}, {},
{}, '/'),
Optional(ElementsAre(R{KeyRange::Singleton("0"), {}})));
}
TEST(ChunkKeyRangesTest, Rank1Unconstrained) {
EXPECT_THAT(GetRanges(IndexTransformBuilder(1, 1)
.input_shape({50})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{5}},
{{50}}, '/'),
Optional(ElementsAre(R{KeyRange(), Box<>{{0}, {10}}})));
}
TEST(ChunkKeyRangesTest, Rank1Constrained) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(1, 1)
.input_origin({7})
.input_shape({30})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{5}},
{{50}}, '/'),
Optional(ElementsAre(R{KeyRange("1", KeyRange::PrefixExclusiveMax("7")),
Box<>{{1}, {7}}})));
}
TEST(ChunkKeyRangesTest, Rank1ConstrainedSplit) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(1, 1)
.input_origin({8})
.input_exclusive_max({13})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{1}},
{{20}}, '/'),
Optional(ElementsAre(R{KeyRange::Singleton("8"), Box<>{{8}, {1}}},
R{KeyRange::Singleton("9"), Box<>{{9}, {1}}},
R{KeyRange("10", KeyRange::PrefixExclusiveMax("12")),
Box<>{{10}, {3}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedBothDims) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({6, 7})
.input_shape({8, 30})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 10}},
{{25, 100}}, '/'),
Optional(
ElementsAre(R{KeyRange("1/0", KeyRange::PrefixExclusiveMax("1/3")),
Box<>{{1, 0}, {1, 4}}},
R{KeyRange("2/0", KeyRange::PrefixExclusiveMax("2/3")),
Box<>{{2, 0}, {1, 4}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedFirstDimOnly) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({6, 0})
.input_shape({8, 50})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 5}},
{{25, 50}}, '/'),
Optional(ElementsAre(R{KeyRange("1/", KeyRange::PrefixExclusiveMax("2/")),
Box<>{{1, 0}, {2, 10}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedFirstDimOnlySplit) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({8, 0})
.input_shape({5, 50})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{1, 5}},
{{25, 50}}, '/'),
Optional(
ElementsAre(R{KeyRange::Prefix("8/"), Box<>{{8, 0}, {1, 10}}},
R{KeyRange::Prefix("9/"), Box<>{{9, 0}, {1, 10}}},
R{KeyRange("10/", "120"), Box<>{{10, 0}, {3, 10}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedSecondDimOnly) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({0, 7})
.input_shape({25, 30})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 5}},
{{25, 50}}, '/'),
Optional(
ElementsAre(R{KeyRange("0/1", KeyRange::PrefixExclusiveMax("0/7")),
Box<>{{0, 1}, {1, 7}}},
R{KeyRange("1/1", KeyRange::PrefixExclusiveMax("1/7")),
Box<>{{1, 1}, {1, 7}}},
R{KeyRange("2/1", KeyRange::PrefixExclusiveMax("2/7")),
Box<>{{2, 1}, {1, 7}}},
R{KeyRange("3/1", KeyRange::PrefixExclusiveMax("3/7")),
Box<>{{3, 1}, {1, 7}}},
R{KeyRange("4/1", KeyRange::PrefixExclusiveMax("4/7")),
Box<>{{4, 1}, {1, 7}}})));
}
} |
632 | cpp | google/tensorstore | multi_barrier | tensorstore/internal/multi_barrier.cc | tensorstore/internal/multi_barrier_test.cc | #ifndef TENSORSTORE_INTERNAL_MULTI_BARRIER_H_
#define TENSORSTORE_INTERNAL_MULTI_BARRIER_H_
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
namespace tensorstore {
namespace internal {
class MultiBarrier {
public:
explicit MultiBarrier(int num_threads);
~MultiBarrier();
bool Block();
private:
absl::Mutex lock_;
int blocking_[2] ABSL_GUARDED_BY(lock_);
int asleep_ ABSL_GUARDED_BY(lock_);
int num_threads_ ABSL_GUARDED_BY(lock_);
};
}
}
#endif
#include "tensorstore/internal/multi_barrier.h"
#include <cassert>
namespace tensorstore {
namespace internal {
namespace {
bool IsZero(void* arg) { return *reinterpret_cast<int*>(arg) == 0; }
}
MultiBarrier::MultiBarrier(int num_threads)
: blocking_{num_threads, 0}, asleep_(0), num_threads_(num_threads << 1) {
assert(num_threads > 0);
}
MultiBarrier::~MultiBarrier() {
absl::MutexLock l(&lock_);
lock_.Await(absl::Condition(IsZero, &asleep_));
}
bool MultiBarrier::Block() {
absl::MutexLock l(&lock_);
int& num_to_block = blocking_[num_threads_ & 1];
num_to_block--;
assert(num_to_block >= 0);
if (num_to_block == 0) {
int num_threads = num_threads_ >> 1;
num_threads_ ^= 1;
blocking_[num_threads_ & 1] = num_threads;
asleep_ = num_threads;
} else {
lock_.Await(absl::Condition(IsZero, &num_to_block));
}
asleep_--;
return asleep_ == 0;
}
}
} | #include "tensorstore/internal/multi_barrier.h"
#include <type_traits>
#include <gtest/gtest.h>
#include "tensorstore/internal/thread/thread.h"
namespace internal = tensorstore::internal;
namespace {
template <typename T>
struct MultiBarrierFixture : public ::testing::Test {};
using NumThreadTypes = ::testing::Types<std::integral_constant<int, 1>,
std::integral_constant<int, 2>,
std::integral_constant<int, 16>>;
TYPED_TEST_SUITE(MultiBarrierFixture, NumThreadTypes);
TYPED_TEST(MultiBarrierFixture, Example) {
constexpr int kIterations = 1000;
constexpr int kNumThreads = TypeParam{}();
internal::MultiBarrier barrier(kNumThreads);
std::atomic<int> winner[kNumThreads] = {};
std::atomic<int> loser[kNumThreads] = {};
internal::Thread threads[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
threads[i] = internal::Thread({"sanity"}, [&, id = i]() {
for (int j = 0; j < kIterations; j++) {
if (barrier.Block()) {
winner[id]++;
} else {
loser[id]++;
}
}
});
}
for (auto& thread : threads) {
thread.Join();
}
int sum = 0;
for (auto& x : winner) {
sum += x;
}
EXPECT_EQ(kIterations, sum);
sum = 0;
for (auto& x : loser) {
sum += x;
}
EXPECT_EQ(kIterations * (kNumThreads - 1), sum);
}
} |
633 | cpp | google/tensorstore | lock_collection | tensorstore/internal/lock_collection.cc | tensorstore/internal/lock_collection_test.cc | #ifndef TENSORSTORE_INTERNAL_LOCK_COLLECTION_H_
#define TENSORSTORE_INTERNAL_LOCK_COLLECTION_H_
#include <cassert>
#include <cstdint>
#include "absl/base/thread_annotations.h"
#include "absl/container/inlined_vector.h"
#include "absl/synchronization/mutex.h"
namespace tensorstore {
namespace internal {
class ABSL_LOCKABLE LockCollection {
public:
using TryLockFunction = bool (*)(void* data, bool lock);
void Register(void* data, TryLockFunction lock_function, bool shared)
ABSL_LOCKS_EXCLUDED(this) {
assert(data);
locks_.emplace_back(data, lock_function, shared);
}
void RegisterShared(absl::Mutex& mutex) ABSL_LOCKS_EXCLUDED(this) {
static_assert(alignof(absl::Mutex) >= 2);
Register(&mutex, &MutexSharedLockFunction, true);
}
void RegisterExclusive(absl::Mutex& mutex) ABSL_LOCKS_EXCLUDED(this) {
static_assert(alignof(absl::Mutex) >= 2);
Register(&mutex, &MutexExclusiveLockFunction, false);
}
bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
void unlock() ABSL_UNLOCK_FUNCTION();
void clear() ABSL_LOCKS_EXCLUDED(this);
private:
constexpr static std::uintptr_t kTagMask = 1;
constexpr static std::uintptr_t kDataPointerMask = ~kTagMask;
static bool MutexSharedLockFunction(void* mutex, bool lock);
static bool MutexExclusiveLockFunction(void* mutex, bool lock);
struct Entry {
explicit Entry(void* data, TryLockFunction lock_function, bool shared) {
tagged_pointer = reinterpret_cast<std::uintptr_t>(data);
assert(!(tagged_pointer & kTagMask));
tagged_pointer |= static_cast<std::uintptr_t>(shared);
this->lock_function = lock_function;
}
void* data() const {
return reinterpret_cast<void*>(tagged_pointer & kDataPointerMask);
}
std::uintptr_t tagged_pointer;
TryLockFunction lock_function;
};
absl::InlinedVector<Entry, 4> locks_;
};
}
}
#endif
#include "tensorstore/internal/lock_collection.h"
namespace tensorstore {
namespace internal {
bool LockCollection::MutexSharedLockFunction(void* mutex, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto& m = *static_cast<absl::Mutex*>(mutex);
if (lock) {
m.ReaderLock();
} else {
m.ReaderUnlock();
}
return true;
}
bool LockCollection::MutexExclusiveLockFunction(void* mutex, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto& m = *static_cast<absl::Mutex*>(mutex);
if (lock) {
m.WriterLock();
} else {
m.WriterUnlock();
}
return true;
}
bool LockCollection::try_lock() {
if (locks_.size() > 1) {
std::sort(locks_.begin(), locks_.end(), [](const Entry& a, const Entry& b) {
return a.tagged_pointer < b.tagged_pointer;
});
locks_.erase(std::unique(locks_.begin(), locks_.end(),
[](const Entry& a, const Entry& b) {
return a.data() == b.data();
}),
locks_.end());
}
size_t i = 0, size = locks_.size();
auto* locks = locks_.data();
for (; i < size; ++i) {
auto& entry = locks[i];
if (!entry.lock_function(entry.data(), true)) {
while (i > 0) {
--i;
auto& prev_entry = locks[i];
prev_entry.lock_function(prev_entry.data(), false);
}
return false;
}
}
return true;
}
void LockCollection::unlock() {
for (const auto& entry : locks_) {
entry.lock_function(entry.data(), false);
}
}
void LockCollection::clear() { locks_.clear(); }
}
} | #include "tensorstore/internal/lock_collection.h"
#include <array>
#include <cstddef>
#include <mutex>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/testing/concurrent.h"
namespace {
using ::tensorstore::internal::LockCollection;
using ::tensorstore::internal_testing::TestConcurrent;
TEST(LockCollectionTest, Empty) {
LockCollection c;
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
}
}
TEST(LockCollectionTest, SingleShared) {
absl::Mutex m;
LockCollection c;
c.RegisterShared(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, SingleSharedDuplicate) {
absl::Mutex m;
LockCollection c;
c.RegisterShared(m);
c.RegisterShared(m);
c.RegisterShared(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, SingleExclusive) {
absl::Mutex m;
LockCollection c;
c.RegisterExclusive(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertHeld();
}
m.AssertNotHeld();
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, SingleExclusiveDuplicate) {
absl::Mutex m;
LockCollection c;
c.RegisterShared(m);
c.RegisterExclusive(m);
c.RegisterShared(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, Multiple) {
absl::Mutex m[3];
LockCollection c;
c.RegisterShared(m[0]);
c.RegisterExclusive(m[0]);
c.RegisterShared(m[1]);
c.RegisterShared(m[0]);
c.RegisterShared(m[2]);
c.RegisterShared(m[1]);
c.RegisterShared(m[1]);
c.RegisterShared(m[2]);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m[0].AssertHeld();
m[1].AssertReaderHeld();
m[2].AssertReaderHeld();
}
m[0].AssertNotHeld();
m[1].AssertNotHeld();
m[2].AssertNotHeld();
}
#if !defined(_WIN32)
TEST(LockCollectionTest, MultipleConcurrentExclusive) {
constexpr static size_t kNumMutexes = 3;
absl::Mutex m[kNumMutexes];
constexpr static size_t kNumCollections = 3;
LockCollection c[kNumCollections];
std::array<int, kNumMutexes> mutex_indices;
absl::c_iota(mutex_indices, 0);
const auto RegisterFromPermutation = [&](LockCollection& lock_collection) {
for (auto i : mutex_indices) lock_collection.RegisterExclusive(m[i]);
};
RegisterFromPermutation(c[0]);
absl::c_next_permutation(mutex_indices);
RegisterFromPermutation(c[1]);
while (absl::c_next_permutation(mutex_indices)) {
c[2] = LockCollection();
RegisterFromPermutation(c[2]);
TestConcurrent<kNumCollections>(
100,
[] {},
[] {},
[&](size_t i) {
std::unique_lock<LockCollection> guard(c[i], std::try_to_lock);
ASSERT_TRUE(guard);
});
}
}
TEST(LockCollectionTest, MultipleConcurrentExclusiveShared) {
constexpr static size_t kNumMutexes = 3;
absl::Mutex m[kNumMutexes];
constexpr static size_t kNumCollections = 3;
constexpr static size_t kNumSharedCombinations = size_t(1) << kNumMutexes;
LockCollection c[kNumCollections];
std::array<int, kNumMutexes> mutex_indices;
absl::c_iota(mutex_indices, 0);
const auto RegisterFromPermutation = [&](LockCollection& lock_collection,
size_t shared_bit_vector) {
for (auto i : mutex_indices) {
if ((shared_bit_vector >> i) & i) {
lock_collection.RegisterShared(m[i]);
} else {
lock_collection.RegisterExclusive(m[i]);
}
}
};
RegisterFromPermutation(c[0], 0);
absl::c_next_permutation(mutex_indices);
RegisterFromPermutation(c[1], ~size_t(0));
while (absl::c_next_permutation(mutex_indices)) {
for (size_t shared_bit_vector = 0;
shared_bit_vector < kNumSharedCombinations; ++shared_bit_vector) {
c[2] = LockCollection();
RegisterFromPermutation(c[2], shared_bit_vector);
TestConcurrent<kNumCollections>(
20,
[] {},
[] {},
[&](size_t i) {
std::unique_lock<LockCollection> guard(c[i], std::try_to_lock);
EXPECT_TRUE(guard);
});
}
}
}
#endif
struct LoggingLockable;
using LockLog = std::vector<std::pair<LoggingLockable*, bool>>;
struct LoggingLockable {
LockLog& log;
bool fail;
};
TEST(LockCollectionTest, Fail) {
LockLog log;
LoggingLockable lockables[4] = {
LoggingLockable{log, false},
LoggingLockable{log, false},
LoggingLockable{log, true},
LoggingLockable{log, true},
};
constexpr auto lock_function = [](void* data, bool lock) -> bool {
auto* lockable = static_cast<LoggingLockable*>(data);
lockable->log.emplace_back(lockable, lock);
if (lock && lockable->fail) return false;
return true;
};
LockCollection c;
for (auto& lockable : lockables) {
c.Register(&lockable, lock_function, false);
}
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
EXPECT_FALSE(guard);
EXPECT_THAT(log,
::testing::ElementsAre(::testing::Pair(&lockables[0], true),
::testing::Pair(&lockables[1], true),
::testing::Pair(&lockables[2], true),
::testing::Pair(&lockables[1], false),
::testing::Pair(&lockables[0], false)));
}
} |
634 | cpp | google/tensorstore | nditerable_elementwise_output_transform | tensorstore/internal/nditerable_elementwise_output_transform.cc | tensorstore/internal/nditerable_elementwise_output_transform_test.cc | #ifndef TENSORSTORE_INTERNAL_NDITERABLE_ELEMENTWISE_OUTPUT_TRANSFORM_H_
#define TENSORSTORE_INTERNAL_NDITERABLE_ELEMENTWISE_OUTPUT_TRANSFORM_H_
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
NDIterable::Ptr GetElementwiseOutputTransformNDIterable(
NDIterable::Ptr output, DataType input_dtype,
ElementwiseClosure<2, void*> closure, Arena* arena);
}
}
#endif
#include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include <array>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
struct ElementwiseOutputTransformNDIterator
: public NDIterator::Base<ElementwiseOutputTransformNDIterator> {
explicit ElementwiseOutputTransformNDIterator(
const NDIterable* output, ElementwiseClosure<2, void*> closure,
NDIterable::IterationBufferKindLayoutView layout,
ArenaAllocator<> allocator)
: output_(span(&output, 1), layout, allocator),
context_(closure.context),
elementwise_function_((*closure.function)[layout.buffer_kind]) {}
ArenaAllocator<> get_allocator() const override {
return output_.get_allocator();
}
bool UpdateBlock(span<const Index> indices, IterationBufferShape block_shape,
IterationBufferPointer pointer,
absl::Status* status) override {
return output_.GetBlock(indices, block_shape, status) &&
elementwise_function_(context_, block_shape, pointer,
output_.block_pointers()[0], status) &&
output_.UpdateBlock(indices, block_shape, status);
}
NDIteratorsWithManagedBuffers<1> output_;
void* context_;
SpecializedElementwiseFunctionPointer<2, void*> elementwise_function_;
};
struct ElementwiseOutputTransformNDIterable
: public NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, 1>,
NDIterable::Base<ElementwiseOutputTransformNDIterable>> {
using Base = NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, 1>,
NDIterable::Base<ElementwiseOutputTransformNDIterable>>;
ElementwiseOutputTransformNDIterable(NDIterable::Ptr output,
DataType input_dtype,
ElementwiseClosure<2, void*> closure,
ArenaAllocator<> allocator)
: Base{{{std::move(output)}}},
input_dtype_(input_dtype),
closure_(closure),
allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
DataType dtype() const override { return input_dtype_; }
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseOutputTransformNDIterator>(
allocator_, this->iterables[0].get(), closure_, layout);
}
DataType input_dtype_;
ElementwiseClosure<2, void*> closure_;
ArenaAllocator<> allocator_;
};
}
NDIterable::Ptr GetElementwiseOutputTransformNDIterable(
NDIterable::Ptr output, DataType input_dtype,
ElementwiseClosure<2, void*> closure, Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseOutputTransformNDIterable>(
ArenaAllocator<>(arena), std::move(output), input_dtype, closure);
}
}
} | #include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include <new>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::NDIterableCopier;
using ::testing::_;
using ::testing::Pair;
template <typename Func, typename SourceArray, typename DestArray>
absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints,
SourceArray source_array, DestArray dest_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<2, void*> closure =
tensorstore::internal::SimpleElementwiseFunction<
Func(typename SourceArray::Element, typename DestArray::Element),
void*>::Closure(&func);
auto iterable =
tensorstore::internal::GetElementwiseOutputTransformNDIterable(
tensorstore::internal::GetTransformedArrayNDIterable(dest_array,
&arena)
.value(),
tensorstore::dtype_v<typename SourceArray::Element>, closure, &arena);
return tensorstore::internal::NDIterableCopier(
*tensorstore::internal::GetTransformedArrayNDIterable(source_array,
&arena)
.value(),
*iterable, dest_array.shape(), constraints, &arena)
.Copy();
}
TEST(NDIterableElementwiseOutputTransformTest, Basic) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* status) { *dest = -*source; },
{}, source, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseOutputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, source, dest),
absl::UnknownError("zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
}
} |
635 | cpp | google/tensorstore | nditerable_array | tensorstore/internal/nditerable_array.cc | tensorstore/internal/nditerable_array_test.cc | #ifndef TENSORSTORE_INTERNAL_NDITERABLE_ARRAY_H_
#define TENSORSTORE_INTERNAL_NDITERABLE_ARRAY_H_
#include "tensorstore/array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
namespace tensorstore {
namespace internal {
NDIterable::Ptr GetArrayNDIterable(SharedOffsetArrayView<const void> array,
Arena* arena);
}
}
#endif
#include "tensorstore/internal/nditerable_array.h"
#include <stddef.h>
#include <cassert>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array_util.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
Index ComputeIteratorBaseOffsetAndByteStrides(
NDIterable::IterationLayoutView layout, span<const Index> orig_byte_strides,
Index* byte_strides) {
assert(layout.full_rank() == orig_byte_strides.size());
Index base_offset = 0;
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
const int dir = layout.directions[dim];
if (dir == -1) {
base_offset = wrap_on_overflow::Add(
base_offset, wrap_on_overflow::Multiply(layout.shape[dim] - 1,
orig_byte_strides[dim]));
}
}
for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) {
const DimensionIndex dim = layout.iteration_dimensions[i];
if (dim == -1) {
byte_strides[i] = 0;
} else {
byte_strides[i] = orig_byte_strides[dim] * layout.directions[dim];
}
}
return base_offset;
}
template <DimensionIndex Rank>
class StridedIteratorImpl;
template <DimensionIndex Rank = -1>
class StridedIteratorImplBase
: public NDIterator::Base<StridedIteratorImpl<Rank>> {
public:
explicit StridedIteratorImplBase(DimensionIndex rank,
ArenaAllocator<> allocator)
: allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
protected:
ArenaAllocator<> allocator_;
std::array<Index, Rank> byte_strides_;
};
template <>
class StridedIteratorImplBase<-1>
: public NDIterator::Base<StridedIteratorImpl<-1>> {
public:
explicit StridedIteratorImplBase(DimensionIndex rank,
ArenaAllocator<> allocator)
: byte_strides_(rank, allocator) {}
ArenaAllocator<> get_allocator() const override {
return byte_strides_.get_allocator();
}
protected:
std::vector<Index, ArenaAllocator<Index>> byte_strides_;
};
template <DimensionIndex Rank = -1>
class StridedIteratorImpl : public StridedIteratorImplBase<Rank> {
using Base = StridedIteratorImplBase<Rank>;
using Base::byte_strides_;
public:
StridedIteratorImpl(ByteStridedPointer<void> data,
span<const Index> orig_byte_strides,
NDIterable::IterationLayoutView layout,
ArenaAllocator<> allocator)
: Base(layout.iteration_rank(), allocator) {
data_ = data + ComputeIteratorBaseOffsetAndByteStrides(
layout, orig_byte_strides, byte_strides_.data());
}
bool GetBlock(span<const Index> indices, IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
Index offset;
if constexpr (Rank == -1) {
offset = IndexInnerProduct(indices.size(), byte_strides_.data(),
indices.data());
} else {
offset = IndexInnerProduct<Rank>(byte_strides_.data(), indices.data());
}
*pointer = IterationBufferPointer{data_ + offset,
byte_strides_[byte_strides_.size() - 2],
byte_strides_[byte_strides_.size() - 1]};
return true;
}
private:
ByteStridedPointer<void> data_;
};
class IndexedIteratorImpl : public NDIterator::Base<IndexedIteratorImpl> {
public:
IndexedIteratorImpl(ByteStridedPointer<void> data,
span<const Index> orig_byte_strides,
NDIterable::IterationBufferLayoutView layout,
ArenaAllocator<> allocator)
: block_inner_size_(layout.block_shape[1]),
buffer_(layout.iteration_rank() +
layout.block_shape[0] * layout.block_shape[1],
allocator) {
data_ = data + ComputeIteratorBaseOffsetAndByteStrides(
layout, orig_byte_strides, buffer_.data());
FillOffsetsArrayFromStride(buffer_[layout.iteration_rank() - 2],
buffer_[layout.iteration_rank() - 1],
layout.block_shape[0], layout.block_shape[1],
buffer_.data() + layout.iteration_rank());
}
ArenaAllocator<> get_allocator() const override {
return buffer_.get_allocator();
}
bool GetBlock(span<const Index> indices, IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
*pointer = IterationBufferPointer{
data_ +
IndexInnerProduct(indices.size(), buffer_.data(), indices.data()),
block_inner_size_, buffer_.data() + indices.size()};
return true;
}
private:
ByteStridedPointer<void> data_;
Index block_inner_size_;
std::vector<Index, ArenaAllocator<Index>> buffer_;
};
class ArrayIterableImpl : public NDIterable::Base<ArrayIterableImpl> {
public:
ArrayIterableImpl(SharedOffsetArrayView<const void> array,
ArenaAllocator<> allocator)
: dtype_(array.dtype()),
byte_strides_(array.byte_strides().begin(), array.byte_strides().end(),
allocator) {
void* origin_pointer =
const_cast<void*>(array.byte_strided_origin_pointer().get());
data_ = std::shared_ptr<void>(std::move(array.pointer()), origin_pointer);
}
ArenaAllocator<> get_allocator() const override {
return byte_strides_.get_allocator();
}
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
return GetDimensionOrderFromByteStrides(byte_strides_[dim_i],
byte_strides_[dim_j]);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
UpdateDirectionPrefsFromByteStrides(byte_strides_, prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
return CanCombineStridedArrayDimensions(
byte_strides_[dim_i], dir_i, byte_strides_[dim_j], dir_j, size_j);
}
DataType dtype() const override { return dtype_; }
IterationBufferConstraint GetIterationBufferConstraint(
IterationLayoutView layout) const override {
const DimensionIndex last_dim = layout.iteration_dimensions.back();
return {(last_dim == -1 ||
(byte_strides_[last_dim] * layout.directions[last_dim] ==
dtype_->size))
? IterationBufferKind::kContiguous
: IterationBufferKind::kStrided,
false};
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0;
}
NDIterator::Ptr GetIterator(
IterationBufferKindLayoutView layout) const override {
if (layout.buffer_kind == IterationBufferKind::kIndexed) {
return MakeUniqueWithVirtualIntrusiveAllocator<IndexedIteratorImpl>(
get_allocator(), data_.get(), byte_strides_, layout);
}
const auto make_strided_iterator = [&](auto rank) {
return MakeUniqueWithVirtualIntrusiveAllocator<
StridedIteratorImpl<decltype(rank)::value>>(
get_allocator(), data_.get(), byte_strides_, layout);
};
switch (layout.iteration_rank()) {
#ifndef TENSORSTORE_NDITERABLE_DISABLE_ARRAY_OPTIMIZE
case 2:
return make_strided_iterator(
std::integral_constant<DimensionIndex, 2>{});
case 3:
return make_strided_iterator(
std::integral_constant<DimensionIndex, 3>{});
#endif
default:
assert(layout.iteration_rank() > 1);
return make_strided_iterator(
std::integral_constant<DimensionIndex, -1>{});
}
}
private:
std::shared_ptr<void> data_;
DataType dtype_;
std::vector<Index, ArenaAllocator<Index>> byte_strides_;
};
}
NDIterable::Ptr GetArrayNDIterable(SharedOffsetArrayView<const void> array,
Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<ArrayIterableImpl>(
ArenaAllocator<>(arena), std::move(array));
}
}
} | #include "tensorstore/internal/nditerable_array.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Array;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::span;
using ::tensorstore::StridedLayout;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::GetArrayNDIterable;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferPointer;
using ::tensorstore::internal::MultiNDIterator;
using ::tensorstore::internal::NDIterable;
using DirectionPref = NDIterable::DirectionPref;
TEST(NDIterableArrayTest, Direct) {
uint8_t data[1000];
Array<uint8_t> array(data + 500,
StridedLayout<>({6, 3, 4, 5}, {-1, -6, 0, 3}));
Arena arena;
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
{
std::vector<DirectionPref> direction_prefs(4, DirectionPref::kCanSkip);
iterable->UpdateDirectionPrefs(direction_prefs.data());
EXPECT_THAT(direction_prefs,
::testing::ElementsAre(
DirectionPref::kBackward, DirectionPref::kBackward,
DirectionPref::kCanSkip, DirectionPref::kForward));
}
EXPECT_GT(iterable->GetDimensionOrder(0, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(0, 2), 0);
EXPECT_GT(iterable->GetDimensionOrder(0, 3), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 0), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 2), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 3), 0);
EXPECT_GT(iterable->GetDimensionOrder(2, 0), 0);
EXPECT_GT(iterable->GetDimensionOrder(2, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 3), 0);
EXPECT_LT(iterable->GetDimensionOrder(3, 0), 0);
EXPECT_GT(iterable->GetDimensionOrder(3, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(3, 2), 0);
EXPECT_TRUE(iterable->CanCombineDimensions(1, 1,
0, 1,
6));
EXPECT_TRUE(iterable->CanCombineDimensions(1, -1,
0, -1,
6));
EXPECT_FALSE(iterable->CanCombineDimensions(1, 1,
0, -1,
6));
EXPECT_FALSE(iterable->CanCombineDimensions(1, 1,
0, 1,
5));
EXPECT_TRUE(iterable->CanCombineDimensions(3, 1,
0, -1,
3));
EXPECT_TRUE(iterable->CanCombineDimensions(3, -1,
0, 1,
3));
EXPECT_TRUE(iterable->CanCombineDimensions(1, -1,
3, 1,
2));
{
auto c = iterable->GetIterationBufferConstraint(
{span<const Index>({6, 3, 4, 5}),
span<const int>({1, 1, 1, 1}),
span<const DimensionIndex>({0, 1, 2, 3}),
span<const Index>({6, 3, 4, 5})});
EXPECT_EQ(IterationBufferKind::kStrided, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
{
auto c = iterable->GetIterationBufferConstraint(
{span<const Index>({6, 3, 4, 5}),
span<const int>({1, 1, 1, 1}),
span<const DimensionIndex>({1, 3, 0}),
span<const Index>({3, 5, 6})});
EXPECT_EQ(IterationBufferKind::kStrided, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
{
auto c = iterable->GetIterationBufferConstraint(
{span<const Index>({6, 3, 4, 5}),
span<const int>({-1, -1, 0, 1}),
span<const DimensionIndex>({1, 3, 0}),
span<const Index>({3, 5, 6})});
EXPECT_EQ(IterationBufferKind::kContiguous, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
EXPECT_EQ(
0, iterable->GetWorkingMemoryBytesPerElement(
{span<const Index>({6, 3, 4, 5}),
span<const int>({-1, -1, 0, 1}),
span<const DimensionIndex>({1, 3, 0}),
span<const Index>({3, 5, 6})},
IterationBufferKind::kContiguous));
EXPECT_EQ(
0, iterable->GetWorkingMemoryBytesPerElement(
{span<const Index>({6, 3, 4, 5}),
span<const int>({-1, -1, 0, 1}),
span<const DimensionIndex>({1, 3, 0}),
span<const Index>({3, 5, 6})},
IterationBufferKind::kStrided));
EXPECT_EQ(
sizeof(Index),
iterable->GetWorkingMemoryBytesPerElement(
{span<const Index>({6, 3, 4, 5}),
span<const int>({-1, -1, 0, 1}),
span<const DimensionIndex>({1, 3, 0}),
span<const Index>({3, 5, 6})},
IterationBufferKind::kIndexed));
{
auto iterator = iterable->GetIterator(
{{{span<const Index>({6, 3, 4, 5}),
span<const int>({-1, -1, 0, 1}),
span<const DimensionIndex>({1, 3, 0}),
span<const Index>({3, 5, 6})},
{1, 3}},
IterationBufferKind::kContiguous});
IterationBufferPointer pointer;
absl::Status status;
EXPECT_TRUE(iterator->GetBlock(span<const Index>({2, 3, 1}), {1, 3},
&pointer, &status));
EXPECT_EQ(&array((6 - 1) - 1, (3 - 1) - 2, 0, 3), pointer.pointer.get());
EXPECT_EQ(1, pointer.inner_byte_stride);
EXPECT_EQ(absl::OkStatus(), status);
}
{
auto iterator = iterable->GetIterator(
{{{span<const Index>({6, 3, 4, 5}),
span<const int>({-1, -1, 0, 1}),
span<const DimensionIndex>({1, 3, 0}),
span<const Index>({3, 5, 6})},
{1, 3}},
IterationBufferKind::kIndexed});
IterationBufferPointer pointer;
absl::Status status;
EXPECT_TRUE(iterator->GetBlock(span<const Index>({2, 3, 1}), {1, 3},
&pointer, &status));
EXPECT_EQ(&array((6 - 1) - 1, (3 - 1) - 2, 0, 3), pointer.pointer.get());
EXPECT_THAT(span<const Index>(pointer.byte_offsets, 3),
::testing::ElementsAre(0, 1, 2));
EXPECT_EQ(absl::OkStatus(), status);
}
}
TEST(NDIterableArrayTest, RankZero) {
auto array = tensorstore::MakeScalarArray<int>(5);
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(span<const Index>{}, {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, -1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre());
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre());
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre());
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.ResetAtBeginning(), ::testing::ElementsAre(1, 1));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, 1}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
TENSORSTORE_EXPECT_OK(status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(0, multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 1}), ::testing::ElementsAre(0, 1));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
#ifndef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
constexpr Index ExpectedBlockSize(Index block_size) { return block_size; }
#else
constexpr Index ExpectedBlockSize(Index block_size) { return 1; }
#endif
TEST(NDIterableArrayTest, RankOne) {
auto array = tensorstore::MakeArray<int>({1, 2, 3, 4, 5});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(span<const Index>({5}), {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(5));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 5));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(5)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(5)));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, ExpectedBlockSize(5)}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 5}), ::testing::ElementsAre(0, 5));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, RankTwoContiguous) {
auto array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(array.shape(), {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 6));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(6)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(6)));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, ExpectedBlockSize(6)}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 6}), ::testing::ElementsAre(0, 6));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, RankTwoTranspose) {
auto array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::fortran_order, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(3, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(
multi_iterator.block_shape,
::testing::ElementsAre(ExpectedBlockSize(3), ExpectedBlockSize(2)));
EXPECT_THAT(
multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(ExpectedBlockSize(3), ExpectedBlockSize(2)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
GTEST_SKIP();
#endif
EXPECT_TRUE(multi_iterator.GetBlock({3, 2}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(&array(0, 0), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int) * 3,
multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({3, 2}), ::testing::ElementsAre(0, 2));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(3, 0));
}
TEST(NDIterableArrayTest, SkipSize1Dimension) {
unsigned char data[300];
Arena arena;
Array<unsigned char> array = {&data[150],
StridedLayout<>({2, 1, 3}, {5, 10, -20})};
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(array.shape(), {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 1, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 0, -1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(3, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 2, 0));
}
TEST(NDIterableArrayTest, SkipZeroByteStride) {
unsigned char data[300];
Arena arena;
Array<unsigned char> array = {&data[150], StridedLayout<>({2, 3}, {5, 0})};
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, FortranOrderArray) {
auto array =
tensorstore::AllocateArray<int>({2, 3}, tensorstore::fortran_order);
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 6));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, ReversedDimensions) {
auto orig_array = tensorstore::AllocateArray<int>({3, 4, 5});
auto orig_shape = orig_array.shape();
auto orig_strides = orig_array.byte_strides();
Array<int> array(
&orig_array(0, 4 - 1, 5 - 1),
StridedLayout<>({orig_shape[2], orig_shape[0], orig_shape[1]},
{-orig_strides[2], orig_strides[0], -orig_strides[1]}));
Arena arena;
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(5, 3, 4));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(-1, 1, -1));
EXPECT_THAT(multi_iterator.iteration_shape,
::testing::ElementsAre(1, 3 * 4 * 5));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 2, 0));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(3 * 4 * 5)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(3 * 4 * 5)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_TRUE(
multi_iterator.GetBlock({1, ExpectedBlockSize(3 * 4 * 5)}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(orig_array.byte_strided_pointer(),
multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
}
TEST(NDIterableArrayTest, MultipleArrays) {
auto array_a = tensorstore::AllocateArray<int>({2, 3}, tensorstore::c_order);
auto array_b =
tensorstore::AllocateArray<int>({2, 3}, tensorstore::fortran_order);
Arena arena;
auto iterable_a = GetArrayNDIterable(array_a, &arena);
auto iterable_b = GetArrayNDIterable(array_b, &arena);
MultiNDIterator<2, true> multi_iterator(
array_a.shape(), tensorstore::skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(
multi_iterator.block_shape,
::testing::ElementsAre(ExpectedBlockSize(2), ExpectedBlockSize(3)));
EXPECT_THAT(
multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(ExpectedBlockSize(2), ExpectedBlockSize(3)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
GTEST_SKIP();
#endif
EXPECT_TRUE(multi_iterator.GetBlock({2, 3}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(&array_a(0, 0), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(&array_b(0, 0), multi_iterator.block_pointers()[1].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_EQ(sizeof(int) * 2,
multi_iterator.block_pointers()[1].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({2, 3}), ::testing::ElementsAre(0, 3));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(2, 0));
}
} |
636 | cpp | google/tensorstore | nditerable_transformed_array | tensorstore/internal/nditerable_transformed_array.cc | tensorstore/internal/nditerable_transformed_array_test.cc | #ifndef TENSORSTORE_INTERNAL_NDITERABLE_TRANSFORMED_ARRAY_H_
#define TENSORSTORE_INTERNAL_NDITERABLE_TRANSFORMED_ARRAY_H_
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
TransformedArray<Shared<const void>> array, Arena* arena);
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
SharedOffsetArrayView<const void> array, IndexTransformView<> transform,
Arena* arena);
}
}
#endif
#include "tensorstore/internal/nditerable_transformed_array.h"
#include <cassert>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_array_util.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace input_dim_iter_flags =
internal_index_space::input_dimension_iteration_flags;
namespace {
class IterableImpl : public NDIterable::Base<IterableImpl> {
public:
IterableImpl(IndexTransform<> transform, allocator_type allocator)
: transform_(std::move(transform)),
input_dimension_flags_(transform_.input_rank(),
input_dim_iter_flags::can_skip, allocator) {}
allocator_type get_allocator() const override {
return input_dimension_flags_.get_allocator();
}
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
auto flags_i = input_dimension_flags_[dim_i];
if ((flags_i & input_dim_iter_flags::array_indexed) !=
(input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {
return (flags_i & input_dim_iter_flags::array_indexed) ? -2 : 2;
}
if (flags_i & input_dim_iter_flags::array_indexed) {
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
const int order = GetDimensionOrderFromByteStrides(
state_.index_array_byte_strides[i][dim_i],
state_.index_array_byte_strides[i][dim_j]);
if (order != 0) return order;
}
}
return GetDimensionOrderFromByteStrides(state_.input_byte_strides[dim_i],
state_.input_byte_strides[dim_j]);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
const DimensionIndex input_rank = transform_.input_rank();
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
UpdateDirectionPrefsFromByteStrides(
span(state_.index_array_byte_strides[i], input_rank), prefs);
}
UpdateDirectionPrefsFromByteStrides(
span(&state_.input_byte_strides[0], input_rank), prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
auto flags_i = input_dimension_flags_[dim_i];
if ((flags_i & input_dim_iter_flags::array_indexed) !=
(input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {
return false;
}
if (flags_i & input_dim_iter_flags::array_indexed) {
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
if (!CanCombineStridedArrayDimensions(
state_.index_array_byte_strides[i][dim_i], dir_i,
state_.index_array_byte_strides[i][dim_j], dir_j, size_j)) {
return false;
}
}
}
return CanCombineStridedArrayDimensions(
state_.input_byte_strides[dim_i], dir_i,
state_.input_byte_strides[dim_j], dir_j, size_j);
}
DataType dtype() const override { return dtype_; }
IterationBufferConstraint GetIterationBufferConstraint(
IterationLayoutView layout) const override {
const DimensionIndex penultimate_dim =
layout.iteration_dimensions[layout.iteration_dimensions.size() - 2];
const DimensionIndex last_dim =
layout.iteration_dimensions[layout.iteration_dimensions.size() - 1];
if ((last_dim == -1 || (input_dimension_flags_[last_dim] &
input_dim_iter_flags::array_indexed) == 0) &&
(penultimate_dim == -1 || (input_dimension_flags_[penultimate_dim] &
input_dim_iter_flags::array_indexed) == 0)) {
return {(last_dim == -1 || state_.input_byte_strides[last_dim] *
layout.directions[last_dim] ==
this->dtype_->size)
? IterationBufferKind::kContiguous
: IterationBufferKind::kStrided,
false};
} else {
return {IterationBufferKind::kIndexed, false};
}
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0;
}
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<IteratorImpl>(
get_allocator(), this, layout);
}
class IteratorImpl : public NDIterator::Base<IteratorImpl> {
public:
IteratorImpl(const IterableImpl* iterable,
NDIterable::IterationBufferKindLayoutView layout,
allocator_type allocator)
: num_index_arrays_(
iterable->state_.num_array_indexed_output_dimensions),
num_index_array_iteration_dims_(0),
iterable_(iterable),
buffer_(
num_index_arrays_ +
layout.iteration_rank() * (num_index_arrays_ + 1) +
((layout.buffer_kind == IterationBufferKind::kIndexed)
? layout.block_shape[0] * layout.block_shape[1]
: 0),
allocator) {
static_assert(sizeof(Index) >= sizeof(void*));
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
ByteStridedPointer<const Index> index_array_pointer =
iterable->state_.index_array_pointers[j].get();
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
if (layout.directions[dim] != -1) continue;
const Index size_minus_1 = layout.shape[dim] - 1;
const Index index_array_byte_stride =
iterable->state_.index_array_byte_strides[j][dim];
index_array_pointer +=
wrap_on_overflow::Multiply(index_array_byte_stride, size_minus_1);
}
buffer_[j] = reinterpret_cast<Index>(index_array_pointer.get());
}
Index base_offset = 0;
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
if (layout.directions[dim] != -1) continue;
const Index size_minus_1 = layout.shape[dim] - 1;
const Index input_byte_stride =
iterable->state_.input_byte_strides[dim];
base_offset = wrap_on_overflow::Add(
base_offset,
wrap_on_overflow::Multiply(input_byte_stride, size_minus_1));
}
for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) {
const DimensionIndex dim = layout.iteration_dimensions[i];
if (dim == -1) {
for (DimensionIndex j = 0; j < num_index_arrays_ + 1; ++j) {
buffer_[num_index_arrays_ + layout.iteration_rank() * j + i] = 0;
}
} else {
const Index dir = layout.directions[dim];
const Index input_byte_stride =
iterable->state_.input_byte_strides[dim];
buffer_[num_index_arrays_ + i] =
wrap_on_overflow::Multiply(input_byte_stride, dir);
if (iterable->input_dimension_flags_[dim] &
input_dim_iter_flags::array_indexed) {
num_index_array_iteration_dims_ = i + 1;
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index index_array_byte_stride =
iterable->state_.index_array_byte_strides[j][dim];
buffer_[num_index_arrays_ + layout.iteration_rank() * (j + 1) +
i] =
wrap_on_overflow::Multiply(index_array_byte_stride, dir);
}
}
}
}
if (layout.buffer_kind == IterationBufferKind::kIndexed) {
Index* offsets_array =
buffer_.data() + num_index_arrays_ +
layout.iteration_rank() * (num_index_arrays_ + 1);
pointer_ =
IterationBufferPointer{iterable->state_.base_pointer + base_offset,
layout.block_shape[1], offsets_array};
if (num_index_array_iteration_dims_ + 1 < layout.iteration_rank()) {
FillOffsetsArrayFromStride(
buffer_[num_index_arrays_ + layout.iteration_rank() - 2],
buffer_[num_index_arrays_ + layout.iteration_rank() - 1],
layout.block_shape[0], layout.block_shape[1], offsets_array);
}
} else {
assert(num_index_array_iteration_dims_ + 1 < layout.iteration_rank());
pointer_ = IterationBufferPointer{
iterable->state_.base_pointer + base_offset,
buffer_[num_index_arrays_ + layout.iteration_rank() - 2],
buffer_[num_index_arrays_ + layout.iteration_rank() - 1]};
}
}
allocator_type get_allocator() const override {
return buffer_.get_allocator();
}
bool GetBlock(span<const Index> indices, IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
IterationBufferPointer block_pointer = pointer_;
block_pointer.pointer += IndexInnerProduct(
indices.size(), indices.data(), buffer_.data() + num_index_arrays_);
if (num_index_array_iteration_dims_ + 1 < indices.size()) {
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index index = ByteStridedPointer<const Index>(
reinterpret_cast<const Index*>(buffer_[j]))[IndexInnerProduct(
num_index_array_iteration_dims_, indices.data(),
buffer_.data() + num_index_arrays_ + indices.size() * (j + 1))];
block_pointer.pointer += wrap_on_overflow::Multiply(
iterable_->state_.index_array_output_byte_strides[j], index);
}
} else {
block_pointer.byte_offsets_outer_stride = block_shape[1];
Index* offsets_array = const_cast<Index*>(block_pointer.byte_offsets);
FillOffsetsArrayFromStride(
buffer_[num_index_arrays_ + indices.size() - 2],
buffer_[num_index_arrays_ + indices.size() - 1], block_shape[0],
block_shape[1], offsets_array);
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index* index_array_byte_strides =
buffer_.data() + num_index_arrays_ + indices.size() * (j + 1);
ByteStridedPointer<const Index> index_array_pointer =
ByteStridedPointer<const Index>(
reinterpret_cast<const Index*>(buffer_[j])) +
IndexInnerProduct(indices.size() - 2, indices.data(),
index_array_byte_strides);
const Index output_byte_stride =
iterable_->state_.index_array_output_byte_strides[j];
const Index penultimate_index_array_byte_stride =
index_array_byte_strides[indices.size() - 2];
const Index last_index_array_byte_stride =
index_array_byte_strides[indices.size() - 1];
if (last_index_array_byte_stride == 0 &&
penultimate_index_array_byte_stride == 0) {
block_pointer.pointer += wrap_on_overflow::Multiply(
output_byte_stride, *index_array_pointer);
} else {
Index block_start0 = indices[indices.size() - 2];
Index block_start1 = indices[indices.size() - 1];
for (Index outer = 0; outer < block_shape[0]; ++outer) {
for (Index inner = 0; inner < block_shape[1]; ++inner) {
Index cur_contribution = wrap_on_overflow::Multiply(
output_byte_stride,
index_array_pointer[wrap_on_overflow::Add(
wrap_on_overflow::Multiply(
outer + block_start0,
penultimate_index_array_byte_stride),
wrap_on_overflow::Multiply(
inner + block_start1,
last_index_array_byte_stride))]);
auto& offset = offsets_array[outer * block_shape[1] + inner];
offset = wrap_on_overflow::Add(offset, cur_contribution);
}
}
}
}
}
*pointer = block_pointer;
return true;
}
private:
DimensionIndex num_index_arrays_;
DimensionIndex num_index_array_iteration_dims_;
const IterableImpl* iterable_;
IterationBufferPointer pointer_;
std::vector<Index, ArenaAllocator<Index>> buffer_;
};
std::shared_ptr<const void> data_owner_;
IndexTransform<> transform_;
internal_index_space::SingleArrayIterationState state_;
DataType dtype_;
std::vector<input_dim_iter_flags::Bitmask,
ArenaAllocator<input_dim_iter_flags::Bitmask>>
input_dimension_flags_;
};
Result<NDIterable::Ptr> MaybeConvertToArrayNDIterable(
std::unique_ptr<IterableImpl, VirtualDestroyDeleter> impl, Arena* arena) {
if (impl->state_.num_array_indexed_output_dimensions == 0) {
return GetArrayNDIterable(
SharedOffsetArrayView<const void>(
SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(impl->data_owner_),
impl->state_.base_pointer),
impl->dtype_),
StridedLayoutView<>(impl->transform_.input_rank(),
impl->transform_.input_shape().data(),
&impl->state_.input_byte_strides[0])),
arena);
}
return impl;
}
}
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
SharedOffsetArrayView<const void> array, IndexTransformView<> transform,
Arena* arena) {
if (!transform.valid()) {
return GetArrayNDIterable(array, arena);
}
auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>(
ArenaAllocator<>(arena), transform);
TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(
array, internal_index_space::TransformAccess::rep(transform),
transform.input_origin().data(), transform.input_shape().data(),
&impl->state_, impl->input_dimension_flags_.data()));
impl->dtype_ = array.dtype();
impl->data_owner_ = std::move(array.element_pointer().pointer());
return MaybeConvertToArrayNDIterable(std::move(impl), arena);
}
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
TransformedArray<Shared<const void>> array, Arena* arena) {
auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>(
ArenaAllocator<>(arena), std::move(array.transform()));
TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(
ElementPointer<const void>(array.element_pointer()),
internal_index_space::TransformAccess::rep(impl->transform_),
impl->transform_.input_origin().data(),
impl->transform_.input_shape().data(), &impl->state_,
impl->input_dimension_flags_.data()));
impl->dtype_ = array.dtype();
impl->data_owner_ = std::move(array.element_pointer().pointer());
return MaybeConvertToArrayNDIterable(std::move(impl), arena);
}
}
} | #include "tensorstore/internal/nditerable_transformed_array.h"
#include <stddef.h>
#include <array>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllocateArray;
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::SharedArray;
using ::tensorstore::skip_repeated_elements;
using ::tensorstore::StridedLayout;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::GetTransformedArrayNDIterable;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferShape;
using ::tensorstore::internal::MultiNDIterator;
using ::tensorstore::internal::NDIterable;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::FieldsAre;
using ::testing::Pair;
using IterationTrace = std::vector<void*>;
template <typename... Element>
std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status>
GetIterationTrace(
MultiNDIterator<sizeof...(Element), true>* multi_iterator) {
std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status>
result;
for (auto block_shape = multi_iterator->ResetAtBeginning();
block_shape[0] && block_shape[1];
block_shape = multi_iterator->StepForward(block_shape)) {
if (!multi_iterator->GetBlock(block_shape, &result.second)) {
break;
}
ptrdiff_t i = 0;
const auto unused = {(
[&] {
const auto get_trace_func = [](void* ptr, IterationTrace* trace) {
trace->push_back(ptr);
};
tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =
tensorstore::internal::SimpleElementwiseFunction<
decltype(get_trace_func)(Element), IterationTrace*>();
func[multi_iterator->buffer_kind](nullptr, block_shape,
multi_iterator->block_pointers()[i],
&result.first[i]);
++i;
}(),
0)...};
(void)unused;
}
return result;
}
template <size_t N>
using BlockTrace =
std::vector<std::tuple<std::vector<Index>, IterationBufferShape,
std::array<IterationTrace, N>>>;
template <typename... Element>
std::pair<BlockTrace<sizeof...(Element)>, absl::Status> GetBlockTrace(
MultiNDIterator<sizeof...(Element), true>* multi_iterator) {
std::pair<BlockTrace<sizeof...(Element)>, absl::Status> result;
for (auto block_shape = multi_iterator->ResetAtBeginning();
block_shape[0] && block_shape[1];
block_shape = multi_iterator->StepForward(block_shape)) {
if (!multi_iterator->GetBlock(block_shape, &result.second)) {
break;
}
auto& [position, shape, traces] = result.first.emplace_back();
position.assign(multi_iterator->position().begin(),
multi_iterator->position().end());
shape = block_shape;
ptrdiff_t i = 0;
const auto unused = {(
[&, traces_ptr = &traces[i]] {
const auto get_trace_func = [](void* ptr, IterationTrace* trace) {
trace->push_back(ptr);
};
tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =
tensorstore::internal::SimpleElementwiseFunction<
decltype(get_trace_func)(Element), IterationTrace*>();
func[multi_iterator->buffer_kind](nullptr, block_shape,
multi_iterator->block_pointers()[i],
traces_ptr);
++i;
}(),
0)...};
(void)unused;
}
return result;
}
class MaybeDirectTest : public ::testing::TestWithParam<bool> {
protected:
Arena arena;
Result<NDIterable::Ptr> GetMaybeDirectTransformedArrayNDIterable(
tensorstore::SharedOffsetArrayView<const void> array,
tensorstore::IndexTransformView<> transform) {
if (GetParam()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto transformed_array,
MakeTransformedArray(array, transform));
return GetTransformedArrayNDIterable(std::move(transformed_array),
&arena);
} else {
return GetTransformedArrayNDIterable(std::move(array), transform, &arena);
}
}
};
INSTANTIATE_TEST_SUITE_P(Indirect, MaybeDirectTest, ::testing::Values(true));
INSTANTIATE_TEST_SUITE_P(Direct, MaybeDirectTest, ::testing::Values(false));
TEST(NDIterableTransformedArrayTest, Strided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0), &a(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, SingleIndexedDimension) {
Arena arena;
auto a = AllocateArray<int>({4});
auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({1, 2, 3, 0})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable->dtype());
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(1), &a(2), &a(3), &a(0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
OneStridedOneIndexedDimensionIndexedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionContiguousBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 2});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1),
&a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1)
})),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionStridedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 4});
auto ta = (a | tensorstore::Dims(2).Stride(2) |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 2), &a(1, 0, 0), &a(1, 0, 2),
&a(0, 2, 0), &a(0, 2, 2), &a(1, 2, 0), &a(1, 2, 2),
&a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2),
&a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2)
})),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionIndexedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 2});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto tb =
(a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1})) |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable1 = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable2 = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable1.get(), iterable2.get()}},
&arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
auto element_matcher = ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1),
&a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1)
});
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(element_matcher, element_matcher), absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedAndReversedStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})) |
tensorstore::Dims(0).SizedInterval(kImplicit, kImplicit, -1))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(-1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombine) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {2, 0}})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombinePartiallyReversed) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1)
.OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}}))
.SizedInterval(kImplicit, kImplicit, {1, -1}))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(1, 1, -1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombineBothReversed) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1)
.OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}}))
.SizedInterval(kImplicit, kImplicit, -1))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(1, -1, -1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedVsStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 2});
auto b = AllocateArray<int>({2, 3});
auto tb =
(b | tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2})))
.value();
auto iterable_a = GetTransformedArrayNDIterable(a, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
tb.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 1), &a(1, 1)),
ElementsAre(&b(0, 0), &b(1, 0), &b(0, 2), &b(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedWith2StridedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 2, 3});
auto ta =
(a | tensorstore::Dims(1).MoveToFront() |
tensorstore::Dims(2).OuterIndexArraySlice(MakeArray<Index>({0, 2, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(
&a(0, 0, 0), &a(0, 1, 0), &a(1, 0, 0), &a(1, 1, 0),
&a(0, 0, 2), &a(0, 1, 2), &a(1, 0, 2), &a(1, 1, 2),
&a(0, 0, 1), &a(0, 1, 1), &a(1, 0, 1), &a(1, 1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, TwoIndexedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta =
(a |
tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1, 1})) |
tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0),
&a(1, 2), &a(1, 0), &a(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, FourIndexedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({{0, 1}, {1, 1}})) |
tensorstore::Dims(-1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {1, 0}})))
.value();
auto b = AllocateArray<int>({2, 2, 2, 2});
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(
ElementsAre(
ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),
ElementsAre(
b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,
b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,
b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,
b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, LastTwoDimsStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({{0, 1}, {1, 1}})) |
tensorstore::Dims(-1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {1, 0}})))
.value();
auto b = AllocateArray<int>({2, 2, 2, 2});
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(
ElementsAre(
ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),
ElementsAre(
b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,
b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,
b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,
b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, TwoTransformedArrays) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto b = AllocateArray<int>({2, 3});
auto ta =
(a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1})))
.value();
auto tb = (b | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 1, 2})))
.value();
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT((GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 1), &a(0, 2),
&a(1, 0), &a(1, 1), &a(1, 2)),
ElementsAre(&b(0, 0), &b(0, 1), &b(0, 2),
&b(1, 0), &b(1, 1), &b(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, ZeroRankIndexArray) {
Arena arena;
SharedArray<const Index> index_array{std::make_shared<Index>(3),
StridedLayout<>({5}, {0})};
int data[100];
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({5})
.output_index_array(0, sizeof(int) * 2, sizeof(int) * 4, index_array)
.Finalize());
auto iterable_a = GetTransformedArrayNDIterable(
{tensorstore::UnownedToShared(
tensorstore::ElementPointer<int>(&data[0])),
transform},
&arena)
.value();
MultiNDIterator<1, true> multi_iterator(
transform.input_shape(), skip_repeated_elements, {{iterable_a.get()}},
&arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, -1));
EXPECT_THAT(
(GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&data[4 * 3 + 2])), absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, OutOfBoundsConstant) {
Arena arena;
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_constant(0, 8)
.Finalize()
.value();
EXPECT_THAT(
GetTransformedArrayNDIterable(a, transform, &arena),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Checking bounds of constant output index map for "
"dimension 0: Index 8 is outside valid range \\[0, 5\\)"));
}
TEST(NDIterableTransformedArrayTest, NullTransform) {
Arena arena;
auto a = AllocateArray<int>({5});
auto iterable_a = GetTransformedArrayNDIterable(a, {}, &arena).value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype());
MultiNDIterator<1, true> multi_iterator(
a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IdentityTransform) {
Arena arena;
auto a = AllocateArray<int>({5});
auto iterable_a =
GetTransformedArrayNDIterable(
a,
tensorstore::IdentityTransform(tensorstore::span<const Index>({5})),
&arena)
.value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype());
MultiNDIterator<1, true> multi_iterator(
a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, OutOfBoundsSingleInputDimension) {
Arena arena;
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_single_input_dimension(0, 2, 1, 0)
.Finalize()
.value();
EXPECT_THAT(GetTransformedArrayNDIterable(a, transform, &arena),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Output dimension 0 range of \\[2, 7\\) is not "
"contained within array domain of \\[0, 5\\)"));
}
TEST_P(MaybeDirectTest, OutOfBoundsIndexArray) {
auto a = AllocateArray<int>({5});
auto transform =
IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_index_array(0, 2, 1, MakeArray<Index>({0, 0, 0, 0, 42}))
.Finalize()
.value();
EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 42 is outside valid range \\[-2, 3\\)"));
}
TEST_P(MaybeDirectTest, OutOfBoundsSingletonIndexArray) {
SharedArray<const Index> index_array{std::make_shared<Index>(42),
StridedLayout<>({5}, {0})};
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_index_array(0, 2, 1, index_array)
.Finalize()
.value();
EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 42 is outside valid range \\[-2, 3\\)"));
}
TEST(NDIterableTransformedArrayTest, BlockTraceThreeStridedDimensions) {
Arena arena;
auto a = AllocateArray<int>({2, 5, 3});
auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1, 2));
EXPECT_THAT(
GetBlockTrace<int>(&multi_iterator),
Pair(ElementsAre(FieldsAre(ElementsAre(0, 0, 0), ElementsAre(2, 3),
ElementsAre(ElementsAreArray({
&a(0, 0, 0),
&a(0, 0, 1),
&a(0, 0, 2),
&a(0, 2, 0),
&a(0, 2, 1),
&a(0, 2, 2),
}))),
FieldsAre(ElementsAre(1, 0, 0), ElementsAre(2, 3),
ElementsAre(ElementsAreArray({
&a(1, 0, 0),
&a(1, 0, 1),
&a(1, 0, 2),
&a(1, 2, 0),
&a(1, 2, 1),
&a(1, 2, 2),
})))),
absl::OkStatus()));
}
} |
637 | cpp | google/tensorstore | json_gtest | tensorstore/internal/json_gtest.cc | tensorstore/internal/json_gtest_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_GTEST_H_
#define TENSORSTORE_INTERNAL_JSON_GTEST_H_
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <nlohmann/json.hpp>
namespace nlohmann {
inline void PrintTo(json const& j, std::ostream* os) { *os << j.dump(); }
}
namespace tensorstore {
::testing::Matcher<::nlohmann::json> MatchesJson(::nlohmann::json j);
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer,
::testing::Matcher<::nlohmann::json> value_matcher);
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer, ::nlohmann::json value_matcher);
::testing::Matcher<::nlohmann::json> JsonSubValuesMatch(
std::vector<std::pair<std::string, ::nlohmann::json>> matchers);
}
#endif
#include "tensorstore/internal/json_gtest.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
class JsonMatcherImpl : public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonMatcherImpl(::nlohmann::json value) : value_(std::move(value)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
if (!internal_json::JsonSame(value_, value_untyped)) {
if (listener->IsInterested()) {
*listener << "where the difference is:\n"
<< ::nlohmann::json::diff(value_, value_untyped).dump(2);
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << "matches json " << value_;
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not match json " << value_;
}
private:
::nlohmann::json value_;
};
}
::testing::Matcher<::nlohmann::json> MatchesJson(::nlohmann::json j) {
return ::testing::MakeMatcher(new JsonMatcherImpl(std::move(j)));
}
namespace {
class JsonPointerMatcherImpl
: public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonPointerMatcherImpl(std::string sub_value_pointer,
::testing::Matcher<::nlohmann::json> sub_value_matcher)
: sub_value_pointer_(std::move(sub_value_pointer)),
sub_value_matcher_(std::move(sub_value_matcher)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
auto sub_value =
json_pointer::Dereference(value_untyped, sub_value_pointer_);
if (!sub_value.ok()) {
if (listener->IsInterested()) {
*listener << "where the pointer could not be resolved: "
<< sub_value.status();
}
return false;
}
if (listener->IsInterested()) {
::testing::StringMatchResultListener s;
if (!sub_value_matcher_.MatchAndExplain(**sub_value, &s)) {
*listener << "whose sub value doesn't match";
auto str = s.str();
if (!str.empty()) {
*listener << ", " << str;
}
return false;
}
return true;
}
return sub_value_matcher_.Matches(**sub_value);
}
void DescribeTo(std::ostream* os) const override {
*os << "has sub value " << tensorstore::QuoteString(sub_value_pointer_)
<< " that ";
sub_value_matcher_.DescribeTo(os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not have sub value "
<< tensorstore::QuoteString(sub_value_pointer_) << " that ";
sub_value_matcher_.DescribeTo(os);
}
private:
std::string sub_value_pointer_;
::testing::Matcher<nlohmann::json> sub_value_matcher_;
};
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer,
::testing::Matcher<::nlohmann::json> value_matcher) {
return ::testing::MakeMatcher(new JsonPointerMatcherImpl(
std::move(json_pointer), std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer, ::nlohmann::json value_matcher) {
return JsonSubValueMatches(std::move(json_pointer),
MatchesJson(std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValuesMatch(
std::vector<std::pair<std::string, ::nlohmann::json>> matchers) {
std::vector<::testing::Matcher<::nlohmann::json>> all;
all.reserve(matchers.size());
for (const auto& p : matchers) {
all.push_back(JsonSubValueMatches(p.first, p.second));
}
return ::testing::AllOfArray(all);
}
} | #include "tensorstore/internal/json_gtest.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::JsonSubValueMatches;
using ::tensorstore::JsonSubValuesMatch;
using ::tensorstore::MatchesJson;
template <typename MatcherType>
std::string Describe(const MatcherType& m) {
std::ostringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(JsonSubValueMatchesTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValueMatches("/a", 123));
EXPECT_THAT(obj, JsonSubValueMatches("/b/c", "xyz"));
EXPECT_THAT(obj,
JsonSubValueMatches("/b/c", ::testing::Not(MatchesJson("xy"))));
EXPECT_THAT(Describe(JsonSubValueMatches("/a", 123)),
"has sub value \"/a\" that matches json 123");
EXPECT_THAT(Explain(JsonSubValueMatches("/a", 124), obj),
::testing::StartsWith(
"whose sub value doesn't match, where the difference is:"));
}
TEST(JsonSubValuesMatchTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValuesMatch({{"/a", 123}, {"/b/c", "xyz"}}));
}
} |
638 | cpp | google/tensorstore | async_write_array | tensorstore/internal/async_write_array.cc | tensorstore/internal/async_write_array_test.cc | #ifndef TENSORSTORE_INTERNAL_ASYNC_WRITE_ARRAY_H_
#define TENSORSTORE_INTERNAL_ASYNC_WRITE_ARRAY_H_
#include <stddef.h>
#include <memory>
#include <utility>
#include <vector>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/masked_array.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
struct AsyncWriteArray {
explicit AsyncWriteArray(DimensionIndex rank);
AsyncWriteArray(AsyncWriteArray&& other)
: write_state(std::move(other.write_state)),
read_generation(std::move(other.read_generation)) {}
struct Spec {
SharedOffsetArray<const void> overall_fill_value;
Box<> valid_data_bounds;
bool store_if_equal_to_fill_value = false;
EqualityComparisonKind fill_value_comparison_kind =
EqualityComparisonKind::identical;
DimensionIndex rank() const { return overall_fill_value.rank(); }
DataType dtype() const { return overall_fill_value.dtype(); }
Index GetNumInBoundsElements(BoxView<> domain) const;
SharedArrayView<const void> GetFillValueForDomain(BoxView<> domain) const;
Result<NDIterable::Ptr> GetReadNDIterable(SharedArrayView<const void> array,
BoxView<> domain,
IndexTransform<> chunk_transform,
Arena* arena) const;
size_t EstimateReadStateSizeInBytes(bool valid,
span<const Index> shape) const {
if (!valid) return 0;
return ProductOfExtents(shape) * dtype()->size;
}
};
struct WritebackData {
SharedArrayView<const void> array;
bool must_store;
bool may_retain_reference_to_array_indefinitely;
};
struct MaskedArray {
explicit MaskedArray(DimensionIndex rank);
size_t EstimateSizeInBytes(const Spec& spec, span<const Index> shape) const;
SharedArray<void> array;
MaskData mask;
enum ArrayCapabilities {
kMutableArray,
kImmutableAndCanRetainIndefinitely,
kImmutableAndCanRetainUntilCommit,
};
ArrayCapabilities array_capabilities;
SharedArrayView<const void> shared_array_view(const Spec& spec) {
return array;
}
Result<TransformedSharedArray<void>> GetWritableTransformedArray(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform);
Result<NDIterable::Ptr> BeginWrite(const Spec& spec, BoxView<> domain,
IndexTransform<> chunk_transform,
Arena* arena);
void EndWrite(const Spec& spec, BoxView<> domain,
IndexTransformView<> chunk_transform, Arena* arena);
void WriteFillValue(const Spec& spec, BoxView<> domain);
bool IsFullyOverwritten(const Spec& spec, BoxView<> domain) const {
return mask.num_masked_elements >= spec.GetNumInBoundsElements(domain);
}
bool IsUnmodified() const { return mask.num_masked_elements == 0; }
void Clear();
WritebackData GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
bool read_state_already_integrated = false);
private:
friend struct AsyncWriteArray;
void EnsureWritable(const Spec& spec);
};
MaskedArray write_state;
void InvalidateReadState() { read_generation = StorageGeneration::Invalid(); }
StorageGeneration read_generation = StorageGeneration::Invalid();
Result<NDIterable::Ptr> GetReadNDIterable(
const Spec& spec, BoxView<> domain,
SharedArrayView<const void> read_array,
const StorageGeneration& read_generation,
IndexTransform<> chunk_transform, Arena* arena);
enum class WriteArraySourceCapabilities {
kCannotRetain,
kMutable,
kImmutableAndCanRetainIndefinitely,
kImmutableAndCanRetainUntilCommit,
};
Result<NDIterable::Ptr> BeginWrite(const Spec& spec, BoxView<> domain,
IndexTransform<> chunk_transform,
Arena* arena);
void EndWrite(const Spec& spec, BoxView<> domain,
IndexTransformView<> chunk_transform, bool success,
Arena* arena);
absl::Status WriteArray(
const Spec& spec, BoxView<> domain, IndexTransformView<> chunk_transform,
absl::FunctionRef<Result<std::pair<TransformedSharedArray<const void>,
WriteArraySourceCapabilities>>()>
get_source_array);
WritebackData GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
const StorageGeneration& read_generation);
};
}
}
#endif
#include "tensorstore/internal/async_write_array.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/masked_array.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
Index AsyncWriteArray::Spec::GetNumInBoundsElements(BoxView<> domain) const {
const DimensionIndex rank = this->rank();
assert(domain.rank() == rank);
Index product = 1;
const BoxView<> bounds = this->valid_data_bounds;
for (DimensionIndex i = 0; i < rank; ++i) {
product *= Intersect(bounds[i], domain[i]).size();
}
return product;
}
SharedArrayView<const void> AsyncWriteArray::Spec::GetFillValueForDomain(
BoxView<> domain) const {
const DimensionIndex rank = domain.rank();
assert(Contains(overall_fill_value.domain(), domain));
return SharedArrayView<const void>(
AddByteOffset(
overall_fill_value.element_pointer(),
IndexInnerProduct(rank, overall_fill_value.byte_strides().data(),
domain.origin().data())),
StridedLayoutView<>(rank, domain.shape().data(),
overall_fill_value.byte_strides().data()));
}
Result<NDIterable::Ptr> AsyncWriteArray::Spec::GetReadNDIterable(
SharedArrayView<const void> array, BoxView<> domain,
IndexTransform<> chunk_transform, Arena* arena) const {
if (!array.valid()) array = GetFillValueForDomain(domain);
assert(internal::RangesEqual(array.shape(), domain.shape()));
StridedLayoutView<dynamic_rank, offset_origin> data_layout(
domain, array.byte_strides());
TENSORSTORE_ASSIGN_OR_RETURN(
chunk_transform,
ComposeLayoutAndTransform(data_layout, std::move(chunk_transform)));
return GetTransformedArrayNDIterable(
{AddByteOffset(std::move(array.element_pointer()),
-data_layout.origin_byte_offset()),
std::move(chunk_transform)},
arena);
}
AsyncWriteArray::MaskedArray::MaskedArray(DimensionIndex rank) : mask(rank) {}
void AsyncWriteArray::MaskedArray::WriteFillValue(const Spec& spec,
BoxView<> domain) {
array = {};
mask.Reset();
mask.num_masked_elements = domain.num_elements();
mask.region = domain;
}
AsyncWriteArray::WritebackData
AsyncWriteArray::MaskedArray::GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
bool read_state_already_integrated) {
assert(domain.rank() == spec.rank());
const auto must_store = [&](ArrayView<const void> array) {
if (spec.store_if_equal_to_fill_value) return true;
return !AreArraysEqual(array, spec.GetFillValueForDomain(domain),
spec.fill_value_comparison_kind);
};
const auto get_writeback_from_array = [&] {
WritebackData writeback;
writeback.array = array;
writeback.must_store = must_store(writeback.array);
if (!writeback.must_store) {
array = {};
writeback.array = spec.GetFillValueForDomain(domain);
writeback.may_retain_reference_to_array_indefinitely = true;
} else {
writeback.may_retain_reference_to_array_indefinitely =
(array_capabilities <= kImmutableAndCanRetainIndefinitely);
}
return writeback;
};
if (!array.valid()) {
if (IsFullyOverwritten(spec, domain)) {
WritebackData writeback;
writeback.array = spec.GetFillValueForDomain(domain);
writeback.must_store = false;
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (IsUnmodified()) {
WritebackData writeback;
writeback.must_store = read_array.valid() && must_store(read_array);
if (writeback.must_store) {
writeback.array = read_array;
} else {
writeback.array = spec.GetFillValueForDomain(domain);
}
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (!read_state_already_integrated && read_array.valid()) {
array_capabilities = kMutableArray;
array = tensorstore::MakeCopy(spec.GetFillValueForDomain(domain),
{c_order, include_repeated_elements});
RebaseMaskedArray(domain, ArrayView<const void>(read_array), array, mask);
return get_writeback_from_array();
}
WritebackData writeback;
writeback.array = spec.GetFillValueForDomain(domain);
writeback.must_store = false;
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (!read_state_already_integrated &&
mask.num_masked_elements != domain.num_elements()) {
EnsureWritable(spec);
RebaseMaskedArray(
domain,
read_array.valid()
? ArrayView<const void>(read_array)
: ArrayView<const void>(spec.GetFillValueForDomain(domain)),
array, mask);
}
return get_writeback_from_array();
}
size_t AsyncWriteArray::MaskedArray::EstimateSizeInBytes(
const Spec& spec, span<const Index> shape) const {
size_t total = 0;
if (array.valid()) {
total += GetByteExtent(array);
}
if (mask.mask_array) {
const Index num_elements = ProductOfExtents(shape);
total += num_elements * sizeof(bool);
}
return total;
}
void AsyncWriteArray::MaskedArray::EnsureWritable(const Spec& spec) {
assert(array.valid());
auto new_array =
tensorstore::AllocateArray(array.shape(), tensorstore::c_order,
tensorstore::default_init, spec.dtype());
CopyArray(array, new_array);
array = std::move(new_array);
array_capabilities = kMutableArray;
}
Result<TransformedSharedArray<void>>
AsyncWriteArray::MaskedArray::GetWritableTransformedArray(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform) {
if (!array.valid()) {
this->array =
tensorstore::AllocateArray(domain.shape(), tensorstore::c_order,
tensorstore::default_init, spec.dtype());
array_capabilities = kMutableArray;
if (IsFullyOverwritten(spec, domain)) {
CopyArray(spec.GetFillValueForDomain(domain), this->array);
} else {
assert(IsUnmodified());
}
} else if (array_capabilities != kMutableArray) {
EnsureWritable(spec);
}
StridedLayoutView<dynamic_rank, offset_origin> data_layout{
domain, this->array.byte_strides()};
TENSORSTORE_ASSIGN_OR_RETURN(
chunk_transform,
ComposeLayoutAndTransform(data_layout, std::move(chunk_transform)));
return {std::in_place,
UnownedToShared(
AddByteOffset(ElementPointer<void>(this->array.element_pointer()),
-data_layout.origin_byte_offset())),
std::move(chunk_transform)};
}
Result<NDIterable::Ptr> AsyncWriteArray::MaskedArray::BeginWrite(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform,
Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_array,
GetWritableTransformedArray(spec, domain, std::move(chunk_transform)));
return GetTransformedArrayNDIterable(std::move(transformed_array), arena);
}
void AsyncWriteArray::MaskedArray::EndWrite(
const Spec& spec, BoxView<> domain, IndexTransformView<> chunk_transform,
Arena* arena) {
WriteToMask(&mask, domain, chunk_transform, arena);
}
void AsyncWriteArray::MaskedArray::Clear() {
mask.Reset();
array = {};
}
AsyncWriteArray::AsyncWriteArray(DimensionIndex rank) : write_state(rank) {}
AsyncWriteArray::WritebackData AsyncWriteArray::GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
const StorageGeneration& read_generation) {
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array, read_generation == this->read_generation);
if (write_state.array.valid()) this->read_generation = read_generation;
return writeback_data;
}
Result<NDIterable::Ptr> AsyncWriteArray::GetReadNDIterable(
const Spec& spec, BoxView<> domain, SharedArrayView<const void> read_array,
const StorageGeneration& read_generation, IndexTransform<> chunk_transform,
Arena* arena) {
if (!read_array.valid()) read_array = spec.GetFillValueForDomain(domain);
if (!write_state.IsUnmodified()) {
if (write_state.IsFullyOverwritten(spec, domain)) {
if (!write_state.array.valid()) {
read_array = spec.GetFillValueForDomain(domain);
}
} else if (this->read_generation != read_generation) {
assert(write_state.array.valid());
if (write_state.array_capabilities != MaskedArray::kMutableArray) {
write_state.EnsureWritable(spec);
}
RebaseMaskedArray(domain, read_array, write_state.array,
write_state.mask);
this->read_generation = read_generation;
}
if (write_state.array.valid()) {
read_array = write_state.array;
}
}
return spec.GetReadNDIterable(std::move(read_array), domain,
std::move(chunk_transform), arena);
}
namespace {
bool ZeroCopyToWriteArray(
const AsyncWriteArray::Spec& spec, BoxView<> domain,
IndexTransformView<> chunk_transform,
TransformedSharedArray<const void> source_array,
AsyncWriteArray::WriteArraySourceCapabilities source_capabilities,
AsyncWriteArray::MaskedArray& write_state) {
assert(source_capabilities !=
AsyncWriteArray::WriteArraySourceCapabilities::kCannotRetain);
const DimensionIndex dest_rank = domain.rank();
assert(spec.rank() == dest_rank);
assert(chunk_transform.output_rank() == dest_rank);
IndexTransformView<> source_transform = source_array.transform();
const DimensionIndex input_rank = chunk_transform.input_rank();
assert(source_transform.input_rank() == input_rank);
assert(source_transform.domain().box() == chunk_transform.domain().box());
Index new_byte_strides[kMaxRank];
DimensionIndex dest_dim_for_input_dim[kMaxRank];
std::fill_n(dest_dim_for_input_dim, input_rank, DimensionIndex(-1));
std::fill_n(new_byte_strides, dest_rank, Index(0));
for (DimensionIndex dest_dim = 0; dest_dim < dest_rank; ++dest_dim) {
if (domain.shape()[dest_dim] == 1) continue;
auto map = chunk_transform.output_index_map(dest_dim);
if (map.method() != OutputIndexMethod::single_input_dimension) {
continue;
}
[[maybe_unused]] DimensionIndex prev_dest_dim =
std::exchange(dest_dim_for_input_dim[map.input_dimension()], dest_dim);
assert(prev_dest_dim == -1);
}
const DimensionIndex source_output_rank = source_transform.output_rank();
Index source_offset = 0;
for (DimensionIndex source_output_dim = 0;
source_output_dim < source_output_rank; ++source_output_dim) {
auto map = source_transform.output_index_map(source_output_dim);
source_offset =
internal::wrap_on_overflow::Add(source_offset, map.offset());
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const DimensionIndex dest_dim = dest_dim_for_input_dim[input_dim];
const Index source_stride = map.stride();
if (dest_dim == -1) {
assert(source_transform.input_shape()[input_dim] == 1);
const Index source_origin =
source_transform.input_origin()[input_dim];
source_offset = internal::wrap_on_overflow::Add(
source_offset, internal::wrap_on_overflow::Multiply(
source_origin, source_stride));
break;
}
const auto dest_map = chunk_transform.output_index_map(dest_dim);
const Index dest_stride = dest_map.stride(); | #include "tensorstore/internal/async_write_array.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include <random>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::ReferencesSameDataAs;
using ::tensorstore::span;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::AsyncWriteArray;
using MaskedArray = AsyncWriteArray::MaskedArray;
using Spec = AsyncWriteArray::Spec;
tensorstore::SharedArray<void> CopyNDIterable(
tensorstore::internal::NDIterable::Ptr source_iterable,
span<const Index> shape, Arena* arena) {
auto dest_array = tensorstore::AllocateArray(shape, tensorstore::c_order,
tensorstore::default_init,
source_iterable->dtype());
auto dest_iterable =
tensorstore::internal::GetArrayNDIterable(dest_array, arena);
tensorstore::internal::NDIterableCopier copier(*source_iterable,
*dest_iterable, shape, arena);
TENSORSTORE_EXPECT_OK(copier.Copy());
return dest_array;
}
template <typename Target>
void TestWrite(Target* target, const Spec& spec, BoxView<> domain,
tensorstore::SharedOffsetArrayView<const void> source_array) {
Arena arena;
auto transform = tensorstore::IdentityTransform(source_array.domain());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto dest_iterable, target->BeginWrite(spec, domain, transform, &arena));
auto source_iterable =
tensorstore::internal::GetArrayNDIterable(source_array, &arena);
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *dest_iterable, source_array.shape(), &arena);
TENSORSTORE_EXPECT_OK(copier.Copy());
if constexpr (std::is_same_v<Target, AsyncWriteArray>) {
target->EndWrite(spec, domain, transform, true, &arena);
} else {
target->EndWrite(spec, domain, transform, &arena);
}
}
TEST(SpecTest, Basic) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
EXPECT_EQ(6, spec.GetNumInBoundsElements(BoxView<>({0, 0}, {2, 3})));
EXPECT_EQ(3, spec.GetNumInBoundsElements(BoxView<>({-2, 0}, {2, 3})));
EXPECT_EQ(2, spec.rank());
EXPECT_EQ(tensorstore::dtype_v<int32_t>, spec.dtype());
EXPECT_EQ(0, spec.EstimateReadStateSizeInBytes(false,
span<const Index>({2, 3})));
EXPECT_EQ(2 * 3 * sizeof(int32_t),
spec.EstimateReadStateSizeInBytes(true,
span<const Index>({2, 3})));
{
auto read_array = MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
spec.GetReadNDIterable(
read_array, BoxView<>({2, 6}, {2, 3}),
tensorstore::IdentityTransform(tensorstore::Box<>({2, 6}, {2, 2})),
&arena));
EXPECT_EQ(
MakeArray<int32_t>({{7, 8}, {10, 11}}),
CopyNDIterable(std::move(iterable), span<const Index>({2, 2}), &arena));
}
}
TEST(MaskedArrayTest, Basic) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
auto fill_value_copy = MakeArray<int32_t>({{21, 22, 23}, {31, 32, 33}});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
MaskedArray write_state(2);
Box<> domain({0, 0}, {2, 3});
EXPECT_EQ(0, write_state.EstimateSizeInBytes(spec, domain.shape()));
EXPECT_TRUE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.array.valid());
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, {},
false);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, fill_value_copy,
false);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_EQ(read_array, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
TestWrite(&write_state, spec, domain,
tensorstore::AllocateArray<int32_t>(
tensorstore::BoxView<>({1, 1}, {0, 0})));
EXPECT_TRUE(write_state.array.valid());
EXPECT_TRUE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
EXPECT_EQ(2 * 3 * sizeof(int32_t),
write_state.EstimateSizeInBytes(spec, domain.shape()));
std::fill_n(static_cast<int32_t*>(write_state.array.data()),
domain.num_elements(), 0);
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
EXPECT_EQ(MakeArray<int32_t>({{0, 0, 0}, {0, 7, 8}}),
write_state.shared_array_view(spec));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}}));
EXPECT_EQ(MakeArray<int32_t>({{9, 0, 0}, {0, 7, 8}}),
write_state.shared_array_view(spec));
EXPECT_EQ(MakeArray<bool>({{1, 0, 0}, {0, 1, 1}}),
tensorstore::Array(write_state.mask.mask_array.get(), {2, 3}));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
EXPECT_EQ(2 * 3 * (sizeof(int32_t) + sizeof(bool)),
write_state.EstimateSizeInBytes(spec, domain.shape()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, {},
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 7, 8}}),
writeback_data.array);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
true);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 7, 8}}),
writeback_data.array);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 12, 13}, {14, 7, 8}}),
writeback_data.array);
}
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}, {9}}));
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{10, 10, 10}}));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{10, 10, 10}, {9, 7, 8}}),
writeback_data.array);
}
TestWrite(&write_state, spec, domain, fill_value_copy);
EXPECT_TRUE(write_state.array.valid());
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(write_state.array.valid());
}
write_state.Clear();
EXPECT_TRUE(write_state.IsUnmodified());
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
write_state.WriteFillValue(spec, domain);
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.array.valid());
EXPECT_EQ(0, write_state.EstimateSizeInBytes(spec, domain.shape()));
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(write_state.array.valid());
}
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
EXPECT_EQ(MakeArray<int32_t>({{21, 22, 23}, {31, 7, 8}}),
write_state.shared_array_view(spec));
}
TEST(MaskedArrayTest, PartialChunk) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{-2, 0}, {2, 3}};
MaskedArray write_state(2);
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({-1, 0}, {{7, 8, 9}}));
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
}
TEST(MaskedArrayTest, StoreIfEqualToFillValue) {
auto overall_fill_value = MakeScalarArray<int32_t>(42);
tensorstore::Box<> component_bounds;
Spec spec{overall_fill_value, component_bounds};
spec.store_if_equal_to_fill_value = true;
MaskedArray write_state(0);
TestWrite(&write_state, spec, {}, tensorstore::MakeScalarArray<int32_t>(42));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_EQ(overall_fill_value, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
auto read_array = MakeScalarArray<int32_t>(50);
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, read_array,
false);
EXPECT_EQ(overall_fill_value, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
}
TEST(MaskedArrayTest, CompareFillValueIdenticallyEqual) {
auto fill_value =
MakeScalarArray<float>(std::numeric_limits<float>::quiet_NaN());
tensorstore::Box<> component_bounds;
Spec spec{fill_value, component_bounds};
spec.fill_value_comparison_kind =
tensorstore::EqualityComparisonKind::identical;
MaskedArray write_state(0);
TestWrite(&write_state, spec, {},
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::signaling_NaN()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_TRUE(AreArraysIdenticallyEqual(
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::signaling_NaN()),
writeback_data.array));
EXPECT_TRUE(writeback_data.must_store);
}
TestWrite(&write_state, spec, {},
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::quiet_NaN()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_TRUE(
AreArraysIdenticallyEqual(tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::quiet_NaN()),
writeback_data.array));
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(fill_value.data(), writeback_data.array.data());
}
}
TEST(AsyncWriteArrayTest, Basic) {
AsyncWriteArray async_write_array(2);
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{0, 0}, {2, 3}};
auto fill_value_copy = MakeArray<int32_t>({{21, 22, 23}, {31, 32, 33}});
{
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, {},
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 1}, {2, 2})),
&arena));
EXPECT_EQ(
MakeArray<int32_t>({{22, 23}, {32, 33}}),
CopyNDIterable(std::move(iterable), span<const Index>({2, 2}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{21, 22, 23}, {24, 25, 26}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("b"));
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(read_array, writeback_data.array);
EXPECT_EQ(StorageGeneration::Invalid(), async_write_array.read_generation);
}
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{8}}));
{
auto* data_ptr = async_write_array.write_state.array.data();
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{7}}));
EXPECT_EQ(data_ptr, async_write_array.write_state.array.data());
}
{
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, {},
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(
MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable), span<const Index>({2, 3}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(
MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable), span<const Index>({2, 3}), &arena));
}
tensorstore::SharedArray<const void> prev_writeback_array;
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("a"));
EXPECT_TRUE(writeback_data.must_store);
prev_writeback_array = writeback_data.array;
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
writeback_data.array);
}
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(
MakeArray<int32_t>({{7, 12, 13}, {14, 15, 16}}),
CopyNDIterable(std::move(iterable), span<const Index>({2, 3}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{21, 22, 23}, {24, 25, 26}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("c"));
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {24, 25, 26}}),
writeback_data.array);
EXPECT_EQ(StorageGeneration::FromString("c"),
async_write_array.read_generation);
EXPECT_NE(prev_writeback_array, writeback_data.array);
}
async_write_array.write_state.WriteFillValue(spec, domain);
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(
fill_value_copy,
CopyNDIterable(std::move(iterable), span<const Index>({2, 3}), &arena));
}
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}}));
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(
MakeArray<int32_t>({{9, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable), span<const Index>({2, 3}), &arena));
}
}
TEST(AsyncWriteArrayTest, Issue144) {
AsyncWriteArray async_write_array(1);
auto overall_fill_value = MakeArray<int32_t>({0, 0});
tensorstore::Box<> component_bounds(1);
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{0}, {2}};
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1}, {0}));
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, {},
StorageGeneration::FromString("c"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
for (int i = 0; i < 2; ++i) {
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, {},
StorageGeneration::FromString("d"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
}
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, MakeArray<int32_t>({2, 2}),
StorageGeneration::FromString("e"));
EXPECT_EQ(MakeArray<int32_t>({2, 0}), writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_TRUE(async_write_array.write_state.array.data());
}
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, MakeArray<int32_t>({0, 2}),
StorageGeneration::FromString("f"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
}
}
using WriteArraySourceCapabilities =
AsyncWriteArray::WriteArraySourceCapabilities;
using ArrayCapabilities = AsyncWriteArray::MaskedArray::ArrayCapabilities;
void TestWriteArraySuccess(
WriteArraySourceCapabilities source_capabilities,
ArrayCapabilities expected_array_capabilities, bool may_retain_writeback,
bool zero_copy, tensorstore::IndexTransformView<> chunk_transform,
tensorstore::TransformedSharedArray<const void> source_array) {
SCOPED_TRACE(tensorstore::StrCat("chunk_transform=", chunk_transform));
AsyncWriteArray async_write_array(chunk_transform.output_rank());
tensorstore::Box<> output_range(chunk_transform.output_rank());
ASSERT_THAT(tensorstore::GetOutputRange(chunk_transform, output_range),
::testing::Optional(true));
auto origin = output_range.origin();
SCOPED_TRACE(tensorstore::StrCat("origin=", origin));
auto fill_value =
tensorstore::AllocateArray(output_range, tensorstore::c_order,
tensorstore::value_init, source_array.dtype());
tensorstore::Box<> component_bounds(chunk_transform.output_rank());
Spec spec{fill_value, component_bounds};
size_t orig_use_count = source_array.element_pointer().pointer().use_count();
TENSORSTORE_ASSERT_OK(async_write_array.WriteArray(
spec, output_range, chunk_transform,
[&] { return std::pair{source_array, source_capabilities}; }));
auto validate_zero_copy = [&](const auto& target_array,
size_t orig_use_count) {
EXPECT_EQ((zero_copy ? orig_use_count + 1 : orig_use_count),
source_array.element_pointer().pointer().use_count());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_target_array,
target_array |
tensorstore::AllDims().TranslateTo(output_range.origin()) |
chunk_transform | tensorstore::TryConvertToArray());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_source_array,
source_array | tensorstore::TryConvertToArray());
EXPECT_THAT(
materialized_target_array,
::testing::Conditional(
zero_copy, ReferencesSameDataAs(materialized_source_array),
::testing::Not(ReferencesSameDataAs(materialized_source_array))));
};
{
SCOPED_TRACE(
"Checking async_write_array.write_state.array before calling "
"GetArrayForWriteback");
validate_zero_copy(async_write_array.write_state.array, orig_use_count);
}
EXPECT_EQ(expected_array_capabilities,
async_write_array.write_state.array_capabilities);
{
SCOPED_TRACE("Checking writeback_data");
orig_use_count = source_array.element_pointer().pointer().use_count();
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, output_range, {},
StorageGeneration::Invalid());
validate_zero_copy(writeback_data.array, orig_use_count);
EXPECT_EQ(may_retain_writeback,
writeback_data.may_retain_reference_to_array_indefinitely);
EXPECT_EQ(expected_array_capabilities,
async_write_array.write_state.array_capabilities);
}
}
absl::Status TestWriteArrayError(
WriteArraySourceCapabilities source_capabilities, tensorstore::Box<> box,
tensorstore::IndexTransformView<> chunk_transform,
tensorstore::TransformedSharedArray<const void> source_array) {
AsyncWriteArray async_write_array(chunk_transform.output_rank());
auto fill_value = tensorstore::AllocateArray(
box, tensorstore::c_order, tensorstore::value_init, source_array.dtype());
tensorstore::Box<> component_bounds(chunk_transform.output_rank());
Spec spec{fill_value, component_bounds};
return async_write_array.WriteArray(spec, box, chunk_transform, [&] {
return std::pair{source_array, source_capabilities};
});
}
void TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities source_capabilities,
ArrayCapabilities expected_array_capabilities, bool may_retain_writeback,
bool zero_copy) {
auto source_array = MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
auto chunk_transform = tensorstore::IdentityTransform(source_array.shape());
TestWriteArraySuccess(source_capabilities, expected_array_capabilities,
may_retain_writeback, zero_copy, chunk_transform,
source_array);
}
TEST(WriteArrayIdentityTransformSuccessTest, kCannotRetain) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kCannotRetain,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
false);
}
TEST(WriteArrayIdentityTransformSuccessTest,
kImmutableAndCanRetainIndefinitely) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kImmutableAndCanRetainIndefinitely,
AsyncWriteArray::MaskedArray::kImmutableAndCanRetainIndefinitely,
true,
true);
}
TEST(WriteArrayIdentityTransformSuccessTest,
kImmutableAndCanRetainUntilCommit) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kImmutableAndCanRetainUntilCommit,
AsyncWriteArray::MaskedArray::kImmutableAndCanRetainUntilCommit,
false,
true);
}
TEST(WriteArrayIdentityTransformSuccessTest, kMutable) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kMutable,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
true);
}
TEST(WriteArrayNonIdentityTransformSuccess, kMutable) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_ASYNC_WRITE_ARRAY")};
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
constexpr size_t kNumIterations = 10;
for (size_t iter_i = 0; iter_i < kNumIterations; ++iter_i) {
tensorstore::IndexTransform<> source_transform;
{
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
p;
p.max_stride = 2;
source_transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IndexDomain<>(base_source_array.domain()), p);
}
SCOPED_TRACE(tensorstore::StrCat("source_transform=", source_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto source_array,
base_source_array | source_transform);
auto chunk_transform =
tensorstore::internal::MakeRandomStridedIndexTransformForInputSpace(
gen, source_array.domain());
TestWriteArraySuccess(WriteArraySourceCapabilities::kMutable,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
true, chunk_transform, source_array);
}
}
TEST(WriteArrayErrorTest, SourceArrayIndexArrayMap) {
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 1 |
639 | cpp | google/tensorstore | env | tensorstore/internal/env.cc | tensorstore/internal/env_test.cc | #ifndef TENSORSTORE_INTERNAL_ENV_H_
#define TENSORSTORE_INTERNAL_ENV_H_
#include <optional>
#include <string>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/flags/flag.h"
#include "absl/flags/marshalling.h"
#include "absl/log/absl_log.h"
#include "absl/strings/numbers.h"
namespace tensorstore {
namespace internal {
absl::flat_hash_map<std::string, std::string> GetEnvironmentMap();
std::optional<std::string> GetEnv(char const* variable);
void SetEnv(const char* variable, const char* value);
void UnsetEnv(const char* variable);
template <typename T>
std::optional<T> GetEnvValue(const char* variable) {
auto env = internal::GetEnv(variable);
if (!env) return std::nullopt;
if constexpr (std::is_same_v<std::string, T>) {
return env;
} else if constexpr (std::is_same_v<bool, T>) {
T n;
if (absl::SimpleAtob(*env, &n)) return n;
} else if constexpr (std::is_same_v<float, T>) {
T n;
if (absl::SimpleAtof(*env, &n)) return n;
} else if constexpr (std::is_same_v<double, T>) {
T n;
if (absl::SimpleAtod(*env, &n)) return n;
} else if constexpr (std::is_integral_v<T>) {
T n;
if (absl::SimpleAtoi(*env, &n)) return n;
} else {
std::string err;
T value;
if (absl::ParseFlag(*env, &value, &err)) {
return value;
}
ABSL_LOG(INFO) << "Failed to parse " << variable << "=" << *env
<< " as a value: " << err;
return std::nullopt;
}
ABSL_LOG(INFO) << "Failed to parse" << variable << " as a value: " << *env;
return std::nullopt;
}
template <typename T>
ABSL_MUST_USE_RESULT std::optional<T> GetFlagOrEnvValue(
absl::Flag<std::optional<T>>& flag, const char* variable) {
if (auto val = absl::GetFlag(flag); val.has_value()) return val;
if (auto env = internal::GetEnvValue<T>(variable); env.has_value()) {
return env;
}
return std::nullopt;
}
}
}
#endif
#include "tensorstore/internal/env.h"
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <processenv.h>
#endif
#include <stddef.h>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#ifndef _WIN32
extern char** environ;
#endif
namespace tensorstore {
namespace internal {
absl::flat_hash_map<std::string, std::string> GetEnvironmentMap() {
absl::flat_hash_map<std::string, std::string> result;
#if _WIN32
char* envblock = GetEnvironmentStrings();
for (auto p = envblock; *p; ) {
if (const char* eq = strchr(p, '=')) {
result[std::string(p, eq - p)] = eq + 1;
}
p += strlen(p) + 1;
}
FreeEnvironmentStrings(envblock);
#else
for (auto p = environ; *p; ++p) {
if (const char* eq = strchr(*p, '=')) {
result[std::string(*p, eq - *p)] = eq + 1;
}
}
#endif
return result;
}
std::optional<std::string> GetEnv(char const* variable) {
#if _WIN32
char* buffer;
size_t size;
_dupenv_s(&buffer, &size, variable);
std::unique_ptr<char, decltype(&free)> release(buffer, &free);
#else
char* buffer = std::getenv(variable);
#endif
if (buffer == nullptr) {
return std::optional<std::string>();
}
return std::optional<std::string>(std::string{buffer});
}
void SetEnv(const char* variable, const char* value) {
#if _WIN32
::_putenv_s(variable, value);
#else
::setenv(variable, value, 1);
#endif
}
void UnsetEnv(const char* variable) {
#if _WIN32
::_putenv_s(variable, "");
#else
::unsetenv(variable);
#endif
}
}
} | #include "tensorstore/internal/env.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::GetEnvironmentMap;
using ::tensorstore::internal::GetEnvValue;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
TEST(GetEnvTest, Basic) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "test env var");
{
auto var = GetEnv("TENSORSTORE_TEST_ENV_VAR");
EXPECT_TRUE(var);
EXPECT_EQ("test env var", *var);
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnv("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
TEST(GetEnvTest, GetEnvironmentMap) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "test env var");
auto allenv = GetEnvironmentMap();
EXPECT_FALSE(allenv.empty());
EXPECT_THAT(allenv.count("TENSORSTORE_TEST_ENV_VAR"), 1);
}
TEST(GetEnvTest, ParseBool) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "trUe");
{
EXPECT_THAT(GetEnvValue<bool>("TENSORSTORE_TEST_ENV_VAR"),
testing::Optional(true));
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnvValue<bool>("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
TEST(GetEnvTest, ParseInt) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "123");
{
EXPECT_THAT(GetEnvValue<int>("TENSORSTORE_TEST_ENV_VAR"),
testing::Optional(123));
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnvValue<int>("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
} |
640 | cpp | google/tensorstore | retry | tensorstore/internal/retry.cc | tensorstore/internal/retry_test.cc | #ifndef TENSORSTORE_INTERNAL_RETRY_H_
#define TENSORSTORE_INTERNAL_RETRY_H_
#include "absl/time/time.h"
namespace tensorstore {
namespace internal {
absl::Duration BackoffForAttempt(
int attempt,
absl::Duration initial_delay,
absl::Duration max_delay,
absl::Duration jitter
);
}
}
#endif
#include "tensorstore/internal/retry.h"
#include <stdint.h>
#include <cassert>
#include "absl/random/random.h"
#include "absl/time/time.h"
namespace tensorstore {
namespace internal {
absl::Duration BackoffForAttempt(int attempt, absl::Duration initial_delay,
absl::Duration max_delay,
absl::Duration jitter) {
assert(initial_delay > absl::ZeroDuration());
assert(max_delay >= initial_delay);
assert(attempt >= 0);
int64_t multiple = int64_t{1} << (attempt > 62 ? 62 : attempt);
auto delay = initial_delay * multiple;
int64_t jitter_us = absl::ToInt64Microseconds(jitter);
if (jitter_us > 0) {
delay += absl::Microseconds(absl::Uniform(
absl::IntervalClosed, absl::InsecureBitGen{}, 0, jitter_us));
}
if (delay > max_delay) delay = max_delay;
return delay;
}
}
} | #include "tensorstore/internal/retry.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
namespace {
using ::tensorstore::internal::BackoffForAttempt;
TEST(RetryTest, BackoffForAttempt) {
EXPECT_EQ(absl::Microseconds(1),
BackoffForAttempt(0, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_EQ(absl::Microseconds(2),
BackoffForAttempt(1, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_EQ(absl::Microseconds(4),
BackoffForAttempt(2, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_EQ(
absl::Microseconds(100),
BackoffForAttempt(66, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_THAT(absl::ToInt64Microseconds(BackoffForAttempt(
2, absl::Microseconds(1), absl::Microseconds(200),
absl::Microseconds(100))),
::testing::AllOf(::testing::Ge(2), testing::Le(104)));
}
} |
641 | cpp | google/tensorstore | nditerable_data_type_conversion | tensorstore/internal/nditerable_data_type_conversion.cc | tensorstore/internal/nditerable_data_type_conversion_test.cc | #ifndef TENSORSTORE_INTERNAL_NDITERABLE_DATA_TYPE_CONVERSION_H_
#define TENSORSTORE_INTERNAL_NDITERABLE_DATA_TYPE_CONVERSION_H_
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/internal/nditerable.h"
namespace tensorstore {
namespace internal {
NDIterable::Ptr GetConvertedInputNDIterable(
NDIterable::Ptr iterable, DataType target_type,
const DataTypeConversionLookupResult& conversion);
NDIterable::Ptr GetConvertedOutputNDIterable(
NDIterable::Ptr iterable, DataType source_type,
const DataTypeConversionLookupResult& conversion);
}
}
#endif
#include "tensorstore/internal/nditerable_data_type_conversion.h"
#include <cassert>
#include <memory>
#include <utility>
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
namespace tensorstore {
namespace internal {
namespace {
template <typename Derived, typename BasePointer = NDIterable::Ptr>
class NDIterableAdapter : public NDIterable::Base<Derived> {
public:
NDIterableAdapter(BasePointer base) : base_(std::move(base)) {}
const BasePointer& base() const { return base_; }
BasePointer& base() { return base_; }
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
return base_->GetDimensionOrder(dim_i, dim_j);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
base_->UpdateDirectionPrefs(prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
return base_->CanCombineDimensions(dim_i, dir_i, dim_j, dir_j, size_j);
}
NDIterable::IterationBufferConstraint GetIterationBufferConstraint(
NDIterable::IterationLayoutView layout) const override {
return base_->GetIterationBufferConstraint(layout);
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
NDIterable::IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return base_->GetWorkingMemoryBytesPerElement(layout, buffer_kind);
}
DataType dtype() const override { return base_->dtype(); }
ArenaAllocator<> get_allocator() const override {
return base_->get_allocator();
}
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return base_->GetIterator(layout);
}
private:
BasePointer base_;
};
class ReinterpretCastNDIterable
: public NDIterableAdapter<ReinterpretCastNDIterable> {
public:
ReinterpretCastNDIterable(NDIterable::Ptr base, DataType new_dtype,
ArenaAllocator<> allocator)
: NDIterableAdapter<ReinterpretCastNDIterable>(std::move(base)),
dtype_(new_dtype) {}
DataType dtype() const override { return dtype_; }
private:
DataType dtype_;
};
}
NDIterable::Ptr GetConvertedInputNDIterable(
NDIterable::Ptr iterable, DataType target_type,
const DataTypeConversionLookupResult& conversion) {
assert(DataTypeConversionFlags::kSupported ==
(conversion.flags & DataTypeConversionFlags::kSupported));
if (DataTypeConversionFlags::kIdentity ==
(conversion.flags & DataTypeConversionFlags::kIdentity)) {
return iterable;
}
auto allocator = iterable->get_allocator();
if (DataTypeConversionFlags::kCanReinterpretCast ==
(conversion.flags & DataTypeConversionFlags::kCanReinterpretCast)) {
return MakeUniqueWithVirtualIntrusiveAllocator<ReinterpretCastNDIterable>(
allocator, std::move(iterable), target_type);
}
return GetElementwiseInputTransformNDIterable({{std::move(iterable)}},
target_type, conversion.closure,
allocator.arena());
}
NDIterable::Ptr GetConvertedOutputNDIterable(
NDIterable::Ptr iterable, DataType source_type,
const DataTypeConversionLookupResult& conversion) {
assert(!!(conversion.flags & DataTypeConversionFlags::kSupported));
if (!!(conversion.flags & DataTypeConversionFlags::kIdentity)) {
return iterable;
}
auto allocator = iterable->get_allocator();
if (!!(conversion.flags & DataTypeConversionFlags::kCanReinterpretCast)) {
return MakeUniqueWithVirtualIntrusiveAllocator<ReinterpretCastNDIterable>(
allocator, std::move(iterable), source_type);
}
return GetElementwiseOutputTransformNDIterable(
std::move(iterable), source_type, conversion.closure, allocator.arena());
}
}
} | #include "tensorstore/internal/nditerable_data_type_conversion.h"
#include <stdint.h>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Shared;
using ::tensorstore::SharedArray;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::GetDataTypeConverter;
using ::testing::Pair;
using ::tensorstore::dtypes::json_t;
using ::tensorstore::dtypes::string_t;
}
class NDIterableDataTypeConversionTest : public ::testing::TestWithParam<bool> {
protected:
tensorstore::internal::Arena arena;
std::pair<absl::Status, SharedArray<const void>> Convert(
TransformedArray<Shared<const void>> source, DataType target_dtype) {
tensorstore::internal::Arena arena;
auto target =
tensorstore::AllocateArray(source.shape(), tensorstore::c_order,
tensorstore::value_init, target_dtype);
auto source_iterable =
tensorstore::internal::GetTransformedArrayNDIterable(source, &arena)
.value();
auto target_iterable =
tensorstore::internal::GetArrayNDIterable(target, &arena);
if (GetParam()) {
source_iterable = GetConvertedInputNDIterable(
std::move(source_iterable), target_dtype,
GetDataTypeConverter(source.dtype(), target_dtype));
} else {
target_iterable = GetConvertedOutputNDIterable(
std::move(target_iterable), source.dtype(),
GetDataTypeConverter(source.dtype(), target_dtype));
}
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *target_iterable, target.shape(),
tensorstore::c_order, &arena);
absl::Status status = copier.Copy();
return std::make_pair(status, target);
}
};
INSTANTIATE_TEST_SUITE_P(GetConvertedInputNDIterable,
NDIterableDataTypeConversionTest,
::testing::Values(true));
INSTANTIATE_TEST_SUITE_P(GetConvertedOutputNDIterable,
NDIterableDataTypeConversionTest,
::testing::Values(false));
TEST_P(NDIterableDataTypeConversionTest, Int32ToInt32) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<int32_t>),
Pair(absl::OkStatus(), MakeArray<int32_t>({1, 2, 3})));
}
TEST_P(NDIterableDataTypeConversionTest, Int32ToUint32) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<uint32_t>),
Pair(absl::OkStatus(), MakeArray<uint32_t>({1, 2, 3})));
}
TEST_P(NDIterableDataTypeConversionTest, Int32ToString) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<string_t>),
Pair(absl::OkStatus(), MakeArray<string_t>({"1", "2", "3"})));
}
TEST_P(NDIterableDataTypeConversionTest, JsonToString) {
EXPECT_THAT(
Convert(MakeArray<json_t>({"hello", "world", 3}), dtype_v<string_t>),
Pair(MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string, but received: 3"),
MakeArray<string_t>({"hello", "world", ""})));
} |
642 | cpp | google/tensorstore | nditerable_elementwise_input_transform | tensorstore/internal/nditerable_elementwise_input_transform.cc | tensorstore/internal/nditerable_elementwise_input_transform_test.cc | #ifndef TENSORSTORE_INTERNAL_NDITERABLE_ELEMENTWISE_TRANSFORM_H_
#define TENSORSTORE_INTERNAL_NDITERABLE_ELEMENTWISE_TRANSFORM_H_
#include <array>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
template <size_t Arity>
NDIterable::Ptr GetElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype,
ElementwiseClosure<Arity, void*> closure, Arena* arena);
}
}
#endif
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <stddef.h>
#include <array>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
template <size_t Arity>
class ElementwiseInputTransformNDIterator
: public NDIterator::Base<ElementwiseInputTransformNDIterator<Arity>> {
public:
explicit ElementwiseInputTransformNDIterator(
span<const NDIterable::Ptr, Arity> inputs,
ElementwiseClosure<Arity + 1, void*> closure,
NDIterable::IterationBufferKindLayoutView layout,
ArenaAllocator<> allocator)
: inputs_(inputs, layout, allocator),
context_(closure.context),
elementwise_function_((*closure.function)[layout.buffer_kind]) {}
ArenaAllocator<> get_allocator() const override {
return inputs_.get_allocator();
}
bool GetBlock(span<const Index> indices, IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
return inputs_.GetBlock(indices, block_shape, status) &&
InvokeElementwiseFunction<Arity>(
elementwise_function_, context_, block_shape,
inputs_.block_pointers(), *pointer, static_cast<void*>(status));
}
private:
NDIteratorsWithManagedBuffers<Arity> inputs_;
void* context_;
SpecializedElementwiseFunctionPointer<Arity + 1, void*> elementwise_function_;
};
template <size_t Arity>
class ElementwiseInputTransformNDIterable
: public NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>> {
using Base = NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>>;
public:
ElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity> input_iterables, DataType output_dtype,
ElementwiseClosure<Arity + 1, void*> closure, ArenaAllocator<> allocator)
: Base{std::move(input_iterables)},
output_dtype_(output_dtype),
closure_(closure),
allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
DataType dtype() const override { return output_dtype_; }
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterator<Arity>>(allocator_, this->iterables,
closure_, layout);
}
private:
std::array<NDIterable::Ptr, Arity> inputs_;
DataType output_dtype_;
ElementwiseClosure<Arity + 1, void*> closure_;
ArenaAllocator<> allocator_;
};
}
template <size_t Arity>
NDIterable::Ptr GetElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype,
ElementwiseClosure<Arity, void*> closure, Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterable<Arity - 1>>(
ArenaAllocator<>(arena), std::move(inputs), output_dtype, closure);
}
#define TENSORSTORE_INTERNAL_DO_INSTANTIATE(Arity) \
template NDIterable::Ptr GetElementwiseInputTransformNDIterable<Arity>( \
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype, \
ElementwiseClosure<Arity, void*> closure, Arena * arena); \
TENSORSTORE_INTERNAL_DO_INSTANTIATE(1)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(2)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(3)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(4)
#undef TENSORSTORE_INTERNAL_DO_INSTANTIATE
}
} | #include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <new>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::NDIterableCopier;
using ::testing::_;
using ::testing::Pair;
template <typename Func, typename DestArray, typename... SourceArray>
absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints,
DestArray dest_array, SourceArray... source_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<sizeof...(SourceArray) + 1, void*>
closure = tensorstore::internal::SimpleElementwiseFunction<
Func(typename SourceArray::Element..., typename DestArray::Element),
void*>::Closure(&func);
auto iterable = tensorstore::internal::GetElementwiseInputTransformNDIterable(
{{tensorstore::internal::GetTransformedArrayNDIterable(source_array,
&arena)
.value()...}},
tensorstore::dtype_v<typename DestArray::Element>, closure, &arena);
return NDIterableCopier(*iterable,
*tensorstore::internal::GetTransformedArrayNDIterable(
dest_array, &arena)
.value(),
dest_array.shape(), constraints, &arena)
.Copy();
}
TEST(NDIterableElementwiseInputTransformTest, Nullary) {
auto dest = tensorstore::AllocateArray<double>({2, 3});
TENSORSTORE_EXPECT_OK(TestCopy([](double* dest, void* arg) { *dest = 42.0; },
{}, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{42.0, 42.0, 42.0}, {42.0, 42.0, 42.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Unary) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* arg) { *dest = -*source; },
{}, dest, source));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Binary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(TestCopy([](const int* a, const int* b, double* dest,
void* arg) { *dest = 2.0 * *a + *b; },
{}, dest, a, b));
EXPECT_EQ(
tensorstore::MakeArray<double>({{12.0, 16.0, 20.0}, {24.0, 28.0, 32.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Ternary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto c = tensorstore::MakeArray<double>({{1, -1, 1}, {-1, -1, 1}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(
TestCopy([](const int* a, const int* b, const double* c, double* dest,
void* arg) { *dest = *a + *b * *c; },
{}, dest, a, b, c));
EXPECT_EQ(
tensorstore::MakeArray<double>({{1 + 10 * 1, 2 + 12 * -1, 3 + 14 * 1},
{4 + 16 * -1, 5 + 18 * -1, 6 + 20 * 1}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, dest, source),
MatchesStatus(absl::StatusCode::kUnknown, "zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
}
} |
643 | cpp | google/tensorstore | parse_json_matches | tensorstore/internal/parse_json_matches.cc | tensorstore/internal/parse_json_matches_test.cc | #ifndef TENSORSTORE_INTERNAL_PARSE_JSON_MATCHES_H_
#define TENSORSTORE_INTERNAL_PARSE_JSON_MATCHES_H_
#include <string>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
namespace tensorstore {
namespace internal {
::testing::Matcher<std::string> ParseJsonMatches(
::testing::Matcher<::nlohmann::json> json_matcher);
::testing::Matcher<std::string> ParseJsonMatches(::nlohmann::json json);
}
}
#endif
#include "tensorstore/internal/parse_json_matches.h"
#include <ostream>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
namespace tensorstore {
namespace internal {
namespace {
class Matcher : public ::testing::MatcherInterface<std::string> {
public:
Matcher(::testing::Matcher<::nlohmann::json> json_matcher)
: json_matcher_(std::move(json_matcher)) {}
bool MatchAndExplain(
std::string value,
::testing::MatchResultListener* listener) const override {
return json_matcher_.MatchAndExplain(
tensorstore::internal::ParseJson(value), listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "when parsed as JSON ";
json_matcher_.DescribeTo(os);
}
private:
::testing::Matcher<::nlohmann::json> json_matcher_;
};
}
::testing::Matcher<std::string> ParseJsonMatches(
::testing::Matcher<::nlohmann::json> json_matcher) {
return ::testing::MakeMatcher(new Matcher(std::move(json_matcher)));
}
::testing::Matcher<std::string> ParseJsonMatches(::nlohmann::json json) {
return ParseJsonMatches(MatchesJson(json));
}
}
} | #include "tensorstore/internal/parse_json_matches.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::internal::ParseJsonMatches;
TEST(ParseJsonMatchesTest, Describe) {
std::ostringstream ss;
ParseJsonMatches(::nlohmann::json(true)).DescribeTo(&ss);
EXPECT_EQ("when parsed as JSON matches json true", ss.str());
}
TEST(ParseJsonMatchesTest, Explain) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(ParseJsonMatches(::nlohmann::json(true)),
"false", &listener);
EXPECT_EQ(
"where the difference is:\n"
"[\n"
" {\n"
" \"op\": \"replace\",\n"
" \"path\": \"\",\n"
" \"value\": false\n"
" }\n"
"]",
listener.str());
}
TEST(ParseJsonMatchesTest, Matches) {
EXPECT_THAT("{\"a\":\"b\"}", ParseJsonMatches(::nlohmann::json{{"a", "b"}}));
EXPECT_THAT("{\"a\":\"b\"}",
::testing::Not(ParseJsonMatches(::nlohmann::json{{"a", "c"}})));
EXPECT_THAT("invalid",
::testing::Not(ParseJsonMatches(::nlohmann::json{{"a", "c"}})));
EXPECT_THAT("{\"a\":\"b\"}",
ParseJsonMatches(::testing::Not(::nlohmann::json{{"a", "c"}})));
}
} |
644 | cpp | google/tensorstore | json_pointer | tensorstore/internal/json_pointer.cc | tensorstore/internal/json_pointer_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_POINTER_H_
#define TENSORSTORE_INTERNAL_JSON_POINTER_H_
#include <string_view>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace json_pointer {
absl::Status Validate(std::string_view s);
enum CompareResult {
kLessThan = -2,
kContains = -1,
kEqual = 0,
kContainedIn = 1,
kGreaterThan = 2,
};
CompareResult Compare(std::string_view a, std::string_view b);
std::string EncodeReferenceToken(std::string_view token);
enum DereferenceMode {
kMustExist,
kCreate,
kSimulateCreate,
kDelete,
};
Result<::nlohmann::json*> Dereference(::nlohmann::json& full_value,
std::string_view sub_value_pointer,
DereferenceMode mode);
Result<const ::nlohmann::json*> Dereference(const ::nlohmann::json& full_value,
std::string_view sub_value_pointer,
DereferenceMode mode = kMustExist);
absl::Status Replace(::nlohmann::json& full_value,
std::string_view sub_value_pointer,
::nlohmann::json new_sub_value);
}
}
#endif
#include "tensorstore/internal/json_pointer.h"
#include <algorithm>
#include <string_view>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include <nlohmann/json.hpp>
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace json_pointer {
absl::Status Validate(std::string_view s) {
if (s.empty()) {
return absl::OkStatus();
}
const auto parse_error = [&](const auto&... message) {
return absl::InvalidArgumentError(
tensorstore::StrCat(message..., ": ", tensorstore::QuoteString(s)));
};
if (s[0] != '/') {
return parse_error("JSON Pointer does not start with '/'");
}
for (size_t i = 1; i < s.size(); ++i) {
if (s[i] != '~') continue;
if (i + 1 == s.size() || (s[i + 1] != '0' && s[i + 1] != '1')) {
return parse_error(
"JSON Pointer requires '~' to be followed by '0' or '1'");
}
++i;
}
return absl::OkStatus();
}
namespace {
unsigned char DecodeEscape(char x) {
assert(x == '0' || x == '1');
return x == '0' ? '~' : '/';
}
void DecodeReferenceToken(std::string_view encoded_token, std::string& output) {
output.clear();
output.reserve(encoded_token.size());
for (size_t i = 0; i < encoded_token.size(); ++i) {
char c = encoded_token[i];
switch (c) {
case '~':
++i;
assert(i != encoded_token.size());
output += DecodeEscape(encoded_token[i]);
break;
default:
output += c;
}
}
}
}
CompareResult Compare(std::string_view a, std::string_view b) {
const size_t mismatch_index = std::distance(
a.begin(), std::mismatch(a.begin(), a.end(), b.begin(), b.end()).first);
if (mismatch_index == a.size()) {
if (mismatch_index == b.size()) return kEqual;
if (b[mismatch_index] == '/') {
return kContains;
}
return kLessThan;
}
if (mismatch_index == b.size()) {
if (a[mismatch_index] == '/') {
return kContainedIn;
}
return kGreaterThan;
}
if (a[mismatch_index] == '/') {
return kLessThan;
}
if (b[mismatch_index] == '/') {
return kGreaterThan;
}
unsigned char a_char, b_char;
if (a[mismatch_index - 1] == '~') {
assert(mismatch_index > 0);
a_char = DecodeEscape(a[mismatch_index]);
b_char = DecodeEscape(b[mismatch_index]);
} else {
if (a[mismatch_index] == '~') {
assert(mismatch_index + 1 < a.size());
a_char = DecodeEscape(a[mismatch_index + 1]);
} else {
a_char = a[mismatch_index];
}
if (b[mismatch_index] == '~') {
assert(mismatch_index + 1 < b.size());
b_char = DecodeEscape(b[mismatch_index + 1]);
} else {
b_char = b[mismatch_index];
}
}
return a_char < b_char ? kLessThan : kGreaterThan;
}
std::string EncodeReferenceToken(std::string_view token) {
std::string result;
result.reserve(token.size());
for (char c : token) {
switch (c) {
case '~':
result += {'~', '0'};
break;
case '/':
result += {'~', '1'};
break;
default:
result += c;
}
}
return result;
}
Result<::nlohmann::json*> Dereference(::nlohmann::json& full_value,
std::string_view sub_value_pointer,
DereferenceMode mode) {
if (sub_value_pointer.empty()) {
if (full_value.is_discarded()) {
if (mode == kMustExist) {
return absl::NotFoundError("");
}
if (mode == kDelete) {
return nullptr;
}
}
return &full_value;
}
assert(sub_value_pointer[0] == '/');
size_t i = 1;
auto* sub_value = &full_value;
std::string decoded_reference_token;
while (true) {
if (sub_value->is_discarded()) {
switch (mode) {
case kMustExist:
return absl::NotFoundError("");
case kCreate:
*sub_value = ::nlohmann::json::object_t();
break;
case kSimulateCreate:
case kDelete:
return nullptr;
}
}
size_t pointer_component_end = sub_value_pointer.find('/', i);
const bool is_leaf = pointer_component_end == std::string_view::npos;
const auto quoted_pointer = [&] {
return tensorstore::QuoteString(
sub_value_pointer.substr(0, pointer_component_end));
};
std::string_view pointer_component =
sub_value_pointer.substr(i, pointer_component_end - i);
if (auto* j_obj = sub_value->get_ptr<::nlohmann::json::object_t*>()) {
DecodeReferenceToken(pointer_component, decoded_reference_token);
if (mode == kCreate) {
sub_value = &j_obj
->emplace(decoded_reference_token,
::nlohmann::json::value_t::discarded)
.first->second;
} else if (mode == kDelete && is_leaf) {
j_obj->erase(decoded_reference_token);
return nullptr;
} else {
auto it = j_obj->find(decoded_reference_token);
if (it == j_obj->end()) {
switch (mode) {
case kSimulateCreate:
case kDelete:
return nullptr;
case kMustExist:
return absl::NotFoundError(
tensorstore::StrCat("JSON Pointer ", quoted_pointer(),
" refers to non-existent object member"));
case kCreate:
ABSL_UNREACHABLE();
}
}
sub_value = &it->second;
}
} else if (auto* j_array =
sub_value->get_ptr<::nlohmann::json::array_t*>()) {
if (pointer_component == "-") {
switch (mode) {
case kMustExist:
return absl::FailedPreconditionError(
tensorstore::StrCat("JSON Pointer ", quoted_pointer(),
" refers to non-existent array element"));
case kCreate:
sub_value =
&j_array->emplace_back(::nlohmann::json::value_t::discarded);
break;
case kSimulateCreate:
case kDelete:
return nullptr;
}
} else {
size_t array_index;
if (pointer_component.empty() ||
std::any_of(pointer_component.begin(), pointer_component.end(),
[](char c) { return !absl::ascii_isdigit(c); }) ||
(pointer_component.size() > 1 && pointer_component[0] == '0') ||
!absl::SimpleAtoi(pointer_component, &array_index)) {
return absl::FailedPreconditionError(
tensorstore::StrCat("JSON Pointer ", quoted_pointer(),
" is invalid for array value"));
}
if (array_index >= j_array->size()) {
if (mode == kDelete) return nullptr;
return absl::OutOfRangeError(tensorstore::StrCat(
"JSON Pointer ", quoted_pointer(),
" is out-of-range for array of size ", j_array->size()));
}
if (mode == kDelete && is_leaf) {
j_array->erase(j_array->begin() + array_index);
return nullptr;
}
sub_value = &(*j_array)[array_index];
}
} else {
return absl::FailedPreconditionError(tensorstore::StrCat(
"JSON Pointer reference ", quoted_pointer(), " cannot be applied to ",
sub_value->type_name(), " value: ", *sub_value));
}
if (pointer_component_end == std::string_view::npos) {
assert(mode != kDelete);
return sub_value;
}
i += pointer_component.size() + 1;
}
}
Result<const ::nlohmann::json*> Dereference(const ::nlohmann::json& full_value,
std::string_view sub_value_pointer,
DereferenceMode mode) {
assert(mode == kMustExist || mode == kSimulateCreate);
return json_pointer::Dereference(const_cast<::nlohmann::json&>(full_value),
sub_value_pointer, mode);
}
absl::Status Replace(::nlohmann::json& full_value,
std::string_view sub_value_pointer,
::nlohmann::json new_sub_value) {
if (sub_value_pointer.empty()) {
full_value = std::move(new_sub_value);
return absl::OkStatus();
}
if (!new_sub_value.is_discarded()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto* sub_value,
json_pointer::Dereference(full_value, sub_value_pointer, kCreate));
*sub_value = std::move(new_sub_value);
return absl::OkStatus();
}
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(full_value, sub_value_pointer, kDelete));
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/json_pointer.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::json_pointer::Compare;
using ::tensorstore::json_pointer::CompareResult;
using ::tensorstore::json_pointer::Dereference;
using ::tensorstore::json_pointer::EncodeReferenceToken;
using ::tensorstore::json_pointer::kCreate;
using ::tensorstore::json_pointer::kDelete;
using ::tensorstore::json_pointer::kMustExist;
using ::tensorstore::json_pointer::kSimulateCreate;
using ::tensorstore::json_pointer::Replace;
using ::tensorstore::json_pointer::Validate;
using ::testing::Optional;
using ::testing::Pointee;
TEST(ValidateTest, Valid) {
TENSORSTORE_EXPECT_OK(Validate(""));
TENSORSTORE_EXPECT_OK(Validate("/"));
TENSORSTORE_EXPECT_OK(Validate("/a/"));
TENSORSTORE_EXPECT_OK(Validate("/abc"));
TENSORSTORE_EXPECT_OK(Validate("/abc/"));
TENSORSTORE_EXPECT_OK(Validate("/abc/def"));
TENSORSTORE_EXPECT_OK(Validate("/abc/def/xy~0/~1"));
}
TEST(ValidateTest, Invalid) {
EXPECT_THAT(Validate("foo"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"JSON Pointer does not start with '/': \"foo\""));
EXPECT_THAT(
Validate("/~~"),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"JSON Pointer requires '~' to be followed by '0' or '1': \"/~~\""));
EXPECT_THAT(
Validate("/~"),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"JSON Pointer requires '~' to be followed by '0' or '1': \"/~\""));
EXPECT_THAT(
Validate(std::string_view("/~0", 2)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"JSON Pointer requires '~' to be followed by '0' or '1': \"/~\""));
}
TEST(CompareTest, Basic) {
EXPECT_EQ(Compare("", ""), CompareResult::kEqual);
EXPECT_EQ(Compare("", "/foo"), CompareResult::kContains);
EXPECT_EQ(Compare("/foo", ""), CompareResult::kContainedIn);
EXPECT_EQ(Compare("/a", "/b"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/a", "/ab"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/a/b", "/acc"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/acc", "/a/b"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a*c", "/a/b"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/ab", "/a"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a~0", "/a~1"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a~1", "/a~0"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/a~0", "/ax"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a~1", "/ax"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/ax", "/a~0"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/ax", "/a~1"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/xx", "/xx/abc"), CompareResult::kContains);
EXPECT_EQ(Compare("/xx/abc", "/xx"), CompareResult::kContainedIn);
EXPECT_EQ(Compare("/abc", "/acc"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/b", "/a"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/ba", "/ab"), CompareResult::kGreaterThan);
}
TEST(EncodeReferenceTokenTest, Basic) {
EXPECT_EQ("", EncodeReferenceToken(""));
EXPECT_EQ("abc", EncodeReferenceToken("abc"));
EXPECT_EQ("abc~0", EncodeReferenceToken("abc~"));
EXPECT_EQ("abc~1", EncodeReferenceToken("abc/"));
EXPECT_EQ("abc~1~0xyz", EncodeReferenceToken("abc/~xyz"));
}
TEST(DereferenceTest, ExamplesFromRfc6901) {
::nlohmann::json document = {
{"foo", {"bar", "baz"}},
{"", 0},
{"a/b", 1},
{"c%d", 2},
{"e^f", 3},
{"g|h", 4},
{"i\\j", 5},
{"k\"l", 6},
{" ", 7},
{"m~n", 8},
};
EXPECT_THAT(Dereference(document, "", kMustExist), Optional(&document));
EXPECT_THAT(Dereference(document, "/foo", kMustExist),
Optional(Pointee(::nlohmann::json{"bar", "baz"})));
EXPECT_THAT(Dereference(document, "/foo/0", kMustExist),
Optional(Pointee(::nlohmann::json("bar"))));
EXPECT_THAT(Dereference(document, "/", kMustExist), Optional(Pointee(0)));
EXPECT_THAT(Dereference(document, "/a~1b", kMustExist), Optional(Pointee(1)));
EXPECT_THAT(Dereference(document, "/c%d", kMustExist), Optional(Pointee(2)));
EXPECT_THAT(Dereference(document, "/e^f", kMustExist), Optional(Pointee(3)));
EXPECT_THAT(Dereference(document, "/g|h", kMustExist), Optional(Pointee(4)));
EXPECT_THAT(Dereference(document, "/i\\j", kMustExist), Optional(Pointee(5)));
EXPECT_THAT(Dereference(document, "/k\"l", kMustExist), Optional(Pointee(6)));
EXPECT_THAT(Dereference(document, "/ ", kMustExist), Optional(Pointee(7)));
EXPECT_THAT(Dereference(document, "/m~0n", kMustExist), Optional(Pointee(8)));
}
TEST(DereferenceTest, ConstAccess) {
EXPECT_THAT(Dereference(true, "", kMustExist), Optional(Pointee(true)));
EXPECT_THAT(Dereference(true, "/", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/\" cannot be applied to "
"boolean value: true"));
EXPECT_THAT(
Dereference(true, "/a/b/c", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/a\" cannot be applied to "
"boolean value: true"));
EXPECT_THAT(Dereference({1, 2, 3}, "/0", kMustExist), Optional(Pointee(1)));
EXPECT_THAT(Dereference({1, 2, 3}, "/1", kMustExist), Optional(Pointee(2)));
EXPECT_THAT(
Dereference({1, 2, 3}, "/3", kMustExist),
MatchesStatus(absl::StatusCode::kOutOfRange,
"JSON Pointer \"/3\" is out-of-range for array of size 3"));
EXPECT_THAT(Dereference({1, 2, 3}, "/a", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/a\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/ 1", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/ 1\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/00", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/00\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/-", kMustExist),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/-\" refers to non-existent array element"));
EXPECT_THAT(Dereference({1, {{"a", 7}, {"b", 8}}, 3}, "/1/a", kMustExist),
Optional(Pointee(7)));
EXPECT_THAT(
Dereference({1, {{"a", 7}, {"b", 8}}, 3}, "/1/c", kMustExist),
MatchesStatus(
absl::StatusCode::kNotFound,
"JSON Pointer \"/1/c\" refers to non-existent object member"));
EXPECT_THAT(
Dereference({1, {{"a", 7}, {"b", 8}}, 3}, "/1/c", kMustExist),
MatchesStatus(
absl::StatusCode::kNotFound,
"JSON Pointer \"/1/c\" refers to non-existent object member"));
EXPECT_THAT(
Dereference(::nlohmann::json::value_t::discarded, "/a/b", kMustExist),
MatchesStatus(absl::StatusCode::kNotFound, ""));
}
TEST(DereferenceTest, NonConstAccess) {
{
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Dereference(doc, "/-", kCreate));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{
1, 2, 3, ::nlohmann::json::value_t::discarded}));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kCreate), Optional(&doc));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kMustExist),
MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kDelete), Optional(nullptr));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "/a", kDelete), Optional(nullptr));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kSimulateCreate), Optional(&doc));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
TENSORSTORE_EXPECT_OK(Dereference(doc, "/a/b/c", kCreate));
EXPECT_THAT(
doc,
MatchesJson(::nlohmann::json{
{"a", {{"b", {{"c", ::nlohmann::json::value_t::discarded}}}}}}));
}
{
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Dereference(doc, "/-/x", kCreate));
EXPECT_THAT(doc,
MatchesJson(::nlohmann::json{
1, 2, 3, {{"x", ::nlohmann::json::value_t::discarded}}}));
}
{
::nlohmann::json doc{1, 2, 3};
EXPECT_THAT(
Dereference(doc, "/-/a", kMustExist),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/-\" refers to non-existent array element"));
}
}
TEST(ReplaceTest, ReplaceEntireValue) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "", 42));
EXPECT_THAT(doc, MatchesJson(42));
}
TEST(ReplaceTest, DeleteEntireValue) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
TEST(ReplaceTest, ReplaceArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/1", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 42, 3}));
}
TEST(ReplaceTest, ReplaceNestedWithinArrayElement) {
::nlohmann::json doc{1, {{"a", 2}}, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/1/a", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, {{"a", 42}}, 3}));
}
TEST(ReplaceTest, DeleteNestedWithinArrayElement) {
::nlohmann::json doc{1, {{"a", 2}}, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/1/a", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(
doc, MatchesJson(::nlohmann::json{1, ::nlohmann::json::object_t(), 3}));
}
TEST(ReplaceTest, AppendNestedMember) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/-/a/b/c", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{
1, 2, 3, {{"a", {{"b", {{"c", 42}}}}}}}));
}
TEST(ReplaceTest, ReplaceNestedMember) {
::nlohmann::json doc{1, {{"d", false}}, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/1/a/b/c", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{
1, {{"a", {{"b", {{"c", 42}}}}}, {"d", false}}, 3}));
}
TEST(ReplaceTest, DeleteNestedMember) {
::nlohmann::json doc{{"a", {{"b", {{"c", 42}}}}}, {"d", false}};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/a/b/c", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc,
MatchesJson(::nlohmann::json{
{"a", {{"b", ::nlohmann::json::object_t()}}}, {"d", false}}));
}
TEST(ReplaceTest, DeleteMissingMember) {
::nlohmann::json doc{{"a", {{"b", {{"c", 42}}}}}, {"d", false}};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/a/e", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{{"a", {{"b", {{"c", 42}}}}},
{"d", false}}));
}
TEST(ReplaceTest, DeleteMissingNestedMember) {
::nlohmann::json doc{{"a", {{"b", {{"c", 42}}}}}, {"d", false}};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/a/e/f", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{{"a", {{"b", {{"c", 42}}}}},
{"d", false}}));
}
TEST(ReplaceTest, DeleteArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/1", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 3}));
}
TEST(ReplaceTest, DeleteNewArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/-", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 2, 3}));
}
TEST(ReplaceTest, DeleteOutOfRangeArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/4", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 2, 3}));
}
TEST(ReplaceTest, DeleteInvalidElement) {
::nlohmann::json doc(false);
EXPECT_THAT(Replace(doc, "/4", ::nlohmann::json::value_t::discarded),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/4\" cannot be applied "
"to boolean value: false"));
EXPECT_THAT(doc, MatchesJson(false));
}
} |
645 | cpp | google/tensorstore | utf8 | tensorstore/internal/utf8.cc | tensorstore/internal/utf8_test.cc | #ifndef TENSORSTORE_INTERNAL_UTF8_H_
#define TENSORSTORE_INTERNAL_UTF8_H_
#include <string_view>
namespace tensorstore {
namespace internal {
bool IsValidUtf8(std::string_view code_units);
}
}
#endif
#include "tensorstore/internal/utf8.h"
#include <cstdint>
#include <string_view>
namespace tensorstore {
namespace internal {
namespace {
namespace utf8_decode {
using State = uint32_t;
constexpr State kAccept = 0;
#if 0
constexpr State kReject = 1;
#endif
const uint8_t utf8d[400] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
0xa,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x4,0x3,0x3,
0xb,0x6,0x6,0x6,0x5,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,
0x0,0x1,0x2,0x3,0x5,0x8,0x7,0x1,0x1,0x1,0x4,0x6,0x1,0x1,0x1,0x1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,0,1,0,1,1,1,1,1,1,
1,2,1,1,1,1,1,2,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,
1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,3,1,3,1,1,1,1,1,1,
1,3,1,1,1,1,1,3,1,3,1,1,1,1,1,1,1,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
};
inline State Decode(State* state, char32_t* codep, uint8_t byte) {
uint32_t type = utf8d[byte];
*codep = (*state != kAccept) ? (byte & 0x3fu) | (*codep << 6)
: (0xff >> type) & (byte);
*state = utf8d[256 + *state * 16 + type];
return *state;
}
}
}
bool IsValidUtf8(std::string_view code_units) {
using utf8_decode::kAccept;
utf8_decode::State state = utf8_decode::kAccept;
char32_t codep;
for (const char x : code_units) {
utf8_decode::Decode(&state, &codep, x);
}
return state == kAccept;
}
}
} | #include "tensorstore/internal/utf8.h"
#include <string_view>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::IsValidUtf8;
TEST(IsValidUtf8Test, Empty) {
EXPECT_TRUE(IsValidUtf8(""));
}
TEST(IsValidUtf8Test, Ascii) {
EXPECT_TRUE(IsValidUtf8("ascii"));
EXPECT_TRUE(IsValidUtf8(std::string_view("\0", 1)));
}
TEST(IsValidUtf8Test, TwoByte) {
EXPECT_TRUE(IsValidUtf8("\xc2\x80"));
EXPECT_TRUE(IsValidUtf8("\xc2\x80hello\xc2\xbf"));
}
TEST(IsValidUtf8Test, ThreeByte) {
EXPECT_TRUE(IsValidUtf8("\xe0\xa0\x80"));
}
TEST(IsValidUtf8Test, FourByte) {
EXPECT_TRUE(IsValidUtf8("\xf0\x90\x80\x80"));
}
TEST(IsValidUtf8Test, Surrogate) {
EXPECT_FALSE(IsValidUtf8("\xed\xa0\x80"));
EXPECT_FALSE(IsValidUtf8("\xed\xb0\x80"));
EXPECT_FALSE(IsValidUtf8("\xed\xa0\x80\xed\xb0\x80"));
}
TEST(IsValidUtf8Test, IllFormedFirstByte) {
EXPECT_FALSE(IsValidUtf8("\x80"));
EXPECT_FALSE(IsValidUtf8("\xC1"));
EXPECT_FALSE(IsValidUtf8("\xF5"));
EXPECT_FALSE(IsValidUtf8("\xFF"));
}
TEST(IsValidUtf8Test, OverlongNul) {
EXPECT_FALSE(IsValidUtf8("\xc0\x80"));
EXPECT_FALSE(IsValidUtf8("\xe0\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xf0\x80\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xf8\x80\x80\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xfc\x80\x80\x80\x80\x80"));
}
} |
646 | cpp | google/tensorstore | decoded_matches | tensorstore/internal/decoded_matches.cc | tensorstore/internal/decoded_matches_test.cc | #ifndef TENSORSTORE_INTERNAL_DECODED_MATCHES_H_
#define TENSORSTORE_INTERNAL_DECODED_MATCHES_H_
#include <functional>
#include <string>
#include <string_view>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
::testing::Matcher<absl::Cord> DecodedMatches(
::testing::Matcher<std::string_view> value_matcher,
std::function<Result<std::string>(std::string_view)> decoder);
}
}
#endif
#include "tensorstore/internal/decoded_matches.h"
#include <functional>
#include <ostream>
#include <string>
#include <string_view>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
namespace {
using DecodeFunction = std::function<Result<std::string>(std::string_view)>;
class Matcher : public ::testing::MatcherInterface<absl::Cord> {
public:
Matcher(::testing::Matcher<std::string_view> value_matcher,
DecodeFunction decoder)
: value_matcher_(std::move(value_matcher)),
decoder_(std::move(decoder)) {}
bool MatchAndExplain(
absl::Cord value,
::testing::MatchResultListener* listener) const override {
auto decoded = decoder_(value.Flatten());
if (!decoded.ok()) {
*listener << "Failed to decode value: " << decoded.status();
return false;
}
return value_matcher_.MatchAndExplain(*decoded, listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "when decoded ";
value_matcher_.DescribeTo(os);
}
private:
::testing::Matcher<std::string_view> value_matcher_;
DecodeFunction decoder_;
};
}
::testing::Matcher<absl::Cord> DecodedMatches(
::testing::Matcher<std::string_view> value_matcher,
DecodeFunction decoder) {
return ::testing::MakeMatcher(
new Matcher(std::move(value_matcher), std::move(decoder)));
}
}
} | #include "tensorstore/internal/decoded_matches.h"
#include <cstddef>
#include <sstream>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal::DecodedMatches;
tensorstore::Result<std::string> Stride2Decoder(std::string_view input) {
if (input.size() % 2 != 0) {
return absl::InvalidArgumentError("");
}
std::string output;
for (size_t i = 0; i < input.size(); i += 2) {
output += input[i];
}
return output;
}
TEST(DecodedMatchesTest, Describe) {
std::ostringstream ss;
DecodedMatches("x", Stride2Decoder).DescribeTo(&ss);
EXPECT_EQ("when decoded is equal to \"x\"", ss.str());
}
TEST(DecodedMatchesTest, ExplainValueMatcher) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(
DecodedMatches(::testing::SizeIs(3), Stride2Decoder), absl::Cord("xy"),
&listener);
EXPECT_EQ("whose size 1 doesn't match", listener.str());
}
TEST(DecodedMatchesTest, ExplainDecodeError) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(DecodedMatches("x", Stride2Decoder),
absl::Cord("xyz"), &listener);
EXPECT_EQ("Failed to decode value: INVALID_ARGUMENT: ", listener.str());
}
TEST(DecodedMatchesTest, Matches) {
EXPECT_THAT(absl::Cord("abcd"), DecodedMatches("ac", Stride2Decoder));
EXPECT_THAT(absl::Cord("abc"),
::testing::Not(DecodedMatches("ac", Stride2Decoder)));
EXPECT_THAT(absl::Cord("abcd"),
::testing::Not(DecodedMatches("ab", Stride2Decoder)));
EXPECT_THAT(absl::Cord("abcd"),
DecodedMatches(::testing::Not("ab"), Stride2Decoder));
}
} |
647 | cpp | google/tensorstore | masked_array | tensorstore/internal/masked_array.cc | tensorstore/internal/masked_array_test.cc | #ifndef TENSORSTORE_INTERNAL_MASKED_ARRAY_H_
#define TENSORSTORE_INTERNAL_MASKED_ARRAY_H_
#include <algorithm>
#include <memory>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
struct MaskData {
explicit MaskData(DimensionIndex rank);
void Reset() {
num_masked_elements = 0;
mask_array.reset();
region.Fill(IndexInterval::UncheckedSized(0, 0));
}
std::unique_ptr<bool[], FreeDeleter> mask_array;
Index num_masked_elements = 0;
Box<> region;
};
void WriteToMask(MaskData* mask, BoxView<> output_box,
IndexTransformView<> input_to_output, Arena* arena);
void RebaseMaskedArray(BoxView<> box, ArrayView<const void> source,
ArrayView<void> dest, const MaskData& mask);
void UnionMasks(BoxView<> box, MaskData* mask_a, MaskData* mask_b);
}
}
#endif
#include "tensorstore/internal/masked_array.h"
#include <algorithm>
#include <cassert>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
struct SetMask {
void operator()(bool* x, void*) const { *x = true; }
};
struct SetMaskAndCountChanged {
Index num_changed = 0;
void operator()(bool* x) {
if (!*x) {
++num_changed;
*x = true;
}
}
};
bool IsHullEqualToUnion(BoxView<> a, BoxView<> b) {
assert(a.rank() == b.rank());
Index hull_num_elements = 1, a_num_elements = 1, b_num_elements = 1,
intersection_num_elements = 1;
for (DimensionIndex i = 0; i < a.rank(); ++i) {
IndexInterval a_interval = a[i], b_interval = b[i];
IndexInterval hull = Hull(a_interval, b_interval);
IndexInterval intersection = Intersect(a_interval, b_interval);
hull_num_elements *= hull.size();
a_num_elements *= a_interval.size();
b_num_elements *= b_interval.size();
intersection_num_elements *= intersection.size();
}
return (hull_num_elements ==
a_num_elements + b_num_elements - intersection_num_elements);
}
void Hull(BoxView<> a, BoxView<> b, MutableBoxView<> out) {
const DimensionIndex rank = out.rank();
assert(a.rank() == rank && b.rank() == rank);
for (DimensionIndex i = 0; i < rank; ++i) {
out[i] = Hull(a[i], b[i]);
}
}
void Intersect(BoxView<> a, BoxView<> b, MutableBoxView<> out) {
const DimensionIndex rank = out.rank();
assert(a.rank() == rank && b.rank() == rank);
for (DimensionIndex i = 0; i < rank; ++i) {
out[i] = Intersect(a[i], b[i]);
}
}
Index GetRelativeOffset(span<const Index> base, span<const Index> position,
span<const Index> strides) {
const DimensionIndex rank = base.size();
assert(rank == position.size());
assert(rank == strides.size());
Index result = 0;
for (DimensionIndex i = 0; i < rank; ++i) {
result = internal::wrap_on_overflow::Add(
result, internal::wrap_on_overflow::Multiply(
strides[i], internal::wrap_on_overflow::Subtract(
position[i], base[i])));
}
return result;
}
void RemoveMaskArrayIfNotNeeded(MaskData* mask) {
if (mask->num_masked_elements == mask->region.num_elements()) {
mask->mask_array.reset();
}
}
}
MaskData::MaskData(DimensionIndex rank) : region(rank) {
region.Fill(IndexInterval::UncheckedSized(0, 0));
}
std::unique_ptr<bool[], FreeDeleter> CreateMaskArray(
BoxView<> box, BoxView<> mask_region, span<const Index> byte_strides) {
std::unique_ptr<bool[], FreeDeleter> result(
static_cast<bool*>(std::calloc(box.num_elements(), sizeof(bool))));
ByteStridedPointer<bool> start = result.get();
start += GetRelativeOffset(box.origin(), mask_region.origin(), byte_strides);
internal::IterateOverArrays(
internal::SimpleElementwiseFunction<SetMask(bool), void*>{},
nullptr,
skip_repeated_elements,
ArrayView<bool>(start.get(),
StridedLayoutView<>(mask_region.shape(), byte_strides)));
return result;
}
void CreateMaskArrayFromRegion(BoxView<> box, MaskData* mask,
span<const Index> byte_strides) {
assert(mask->num_masked_elements == mask->region.num_elements());
mask->mask_array = CreateMaskArray(box, mask->region, byte_strides);
}
void UnionMasks(BoxView<> box, MaskData* mask_a, MaskData* mask_b) {
assert(mask_a != mask_b);
if (mask_a->num_masked_elements == 0) {
std::swap(*mask_a, *mask_b);
return;
} else if (mask_b->num_masked_elements == 0) {
return;
}
const DimensionIndex rank = box.rank();
assert(mask_a->region.rank() == rank);
assert(mask_b->region.rank() == rank);
if (mask_a->mask_array && mask_b->mask_array) {
const Index size = box.num_elements();
mask_a->num_masked_elements = 0;
for (Index i = 0; i < size; ++i) {
if ((mask_a->mask_array[i] |= mask_b->mask_array[i])) {
++mask_a->num_masked_elements;
}
}
Hull(mask_a->region, mask_b->region, mask_a->region);
RemoveMaskArrayIfNotNeeded(mask_a);
return;
}
if (!mask_a->mask_array && !mask_b->mask_array) {
if (IsHullEqualToUnion(mask_a->region, mask_b->region)) {
Hull(mask_a->region, mask_b->region, mask_a->region);
mask_a->num_masked_elements = mask_a->region.num_elements();
return;
}
} else if (!mask_a->mask_array) {
std::swap(*mask_a, *mask_b);
}
Index byte_strides[kMaxRank];
const span<Index> byte_strides_span(&byte_strides[0], rank);
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), box.shape(),
byte_strides_span);
if (!mask_a->mask_array) {
CreateMaskArrayFromRegion(box, mask_a, byte_strides_span);
}
ByteStridedPointer<bool> start = mask_a->mask_array.get();
start += GetRelativeOffset(box.origin(), mask_b->region.origin(),
byte_strides_span);
IterateOverArrays(
[&](bool* ptr) {
if (!*ptr) ++mask_a->num_masked_elements;
*ptr = true;
},
{},
ArrayView<bool>(start.get(), StridedLayoutView<>(mask_b->region.shape(),
byte_strides_span)));
Hull(mask_a->region, mask_b->region, mask_a->region);
RemoveMaskArrayIfNotNeeded(mask_a);
}
void RebaseMaskedArray(BoxView<> box, ArrayView<const void> source,
ArrayView<void> dest, const MaskData& mask) {
assert(source.dtype() == dest.dtype());
assert(internal::RangesEqual(box.shape(), source.shape()));
assert(internal::RangesEqual(box.shape(), dest.shape()));
const Index num_elements = box.num_elements();
if (mask.num_masked_elements == num_elements) return;
DataType dtype = source.dtype();
if (mask.num_masked_elements == 0) {
[[maybe_unused]] const auto success = internal::IterateOverArrays(
{&dtype->copy_assign, nullptr},
nullptr, skip_repeated_elements, source, dest);
assert(success);
return;
}
Index mask_byte_strides_storage[kMaxRank];
const span<Index> mask_byte_strides(&mask_byte_strides_storage[0],
box.rank());
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), box.shape(),
mask_byte_strides);
std::unique_ptr<bool[], FreeDeleter> mask_owner;
bool* mask_array_ptr;
if (!mask.mask_array) {
mask_owner = CreateMaskArray(box, mask.region, mask_byte_strides);
mask_array_ptr = mask_owner.get();
} else {
mask_array_ptr = mask.mask_array.get();
}
ArrayView<const bool> mask_array(
mask_array_ptr, StridedLayoutView<>(box.shape(), mask_byte_strides));
[[maybe_unused]] const auto success = internal::IterateOverArrays(
{&dtype->copy_assign_unmasked, nullptr},
nullptr, skip_repeated_elements, source, dest, mask_array);
assert(success);
}
void WriteToMask(MaskData* mask, BoxView<> output_box,
IndexTransformView<> input_to_output, Arena* arena) {
assert(input_to_output.output_rank() == output_box.rank());
if (input_to_output.domain().box().is_empty()) {
return;
}
const DimensionIndex output_rank = output_box.rank();
Box<dynamic_rank(kNumInlinedDims)> output_range(output_rank);
const bool range_is_exact =
GetOutputRange(input_to_output, output_range).value();
Intersect(output_range, output_box, output_range);
Index mask_byte_strides_storage[kMaxRank];
const span<Index> mask_byte_strides(&mask_byte_strides_storage[0],
output_rank);
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), output_box.shape(),
mask_byte_strides);
StridedLayoutView<dynamic_rank, offset_origin> mask_layout(output_box,
mask_byte_strides);
const bool use_mask_array =
output_box.rank() != 0 &&
mask->num_masked_elements != output_box.num_elements() &&
(static_cast<bool>(mask->mask_array) ||
(!Contains(mask->region, output_range) &&
(!range_is_exact || !IsHullEqualToUnion(mask->region, output_range))));
if (use_mask_array && !mask->mask_array) {
CreateMaskArrayFromRegion(output_box, mask, mask_byte_strides);
}
Hull(mask->region, output_range, mask->region);
if (use_mask_array) {
auto mask_iterable =
GetTransformedArrayNDIterable(
ArrayView<Shared<bool>, dynamic_rank, offset_origin>(
AddByteOffset(SharedElementPointer<bool>(
UnownedToShared(mask->mask_array.get())),
-IndexInnerProduct(output_box.origin(),
span(mask_byte_strides))),
mask_layout),
input_to_output, arena)
.value();
SetMaskAndCountChanged set_mask_context;
constexpr ElementwiseFunction<1> set_mask_func =
internal::SimpleElementwiseFunction<SetMaskAndCountChanged(bool)>();
auto status = internal::IterateOverNDIterables<1, true>(
input_to_output.input_shape(), skip_repeated_elements,
{{mask_iterable.get()}}, arena, {&set_mask_func, &set_mask_context});
mask->num_masked_elements += set_mask_context.num_changed;
status.IgnoreError();
assert(status.ok());
} else {
mask->num_masked_elements = mask->region.num_elements();
}
}
}
} | #include "tensorstore/internal/masked_array.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/masked_array_testutil.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::ArrayView;
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeArrayView;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::offset_origin;
using ::tensorstore::SharedArray;
using ::tensorstore::span;
using ::tensorstore::StridedLayout;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::ElementCopyFunction;
using ::tensorstore::internal::MaskData;
using ::tensorstore::internal::SimpleElementwiseFunction;
class MaskedArrayTester {
public:
explicit MaskedArrayTester(BoxView<> box)
: box_(box),
mask_(box.rank()),
mask_layout_zero_origin_(tensorstore::ContiguousLayoutOrder::c,
sizeof(bool), box.shape()) {}
ArrayView<const bool> mask_array() const {
if (!mask_.mask_array) return {};
return ArrayView<const bool>(mask_.mask_array.get(),
mask_layout_zero_origin_);
}
Index num_masked_elements() const { return mask_.num_masked_elements; }
BoxView<> mask_region() const { return mask_.region; }
const MaskData& mask() const { return mask_; }
BoxView<> domain() const { return box_; }
void Combine(MaskedArrayTester&& other) {
UnionMasks(box_, &mask_, &other.mask_);
}
void Reset() { mask_.Reset(); }
protected:
Box<> box_;
MaskData mask_;
StridedLayout<> mask_layout_zero_origin_;
};
template <typename T>
class MaskedArrayWriteTester : public MaskedArrayTester {
public:
explicit MaskedArrayWriteTester(BoxView<> box)
: MaskedArrayTester(box),
dest_(tensorstore::AllocateArray<T>(box, tensorstore::c_order,
tensorstore::value_init)),
dest_layout_zero_origin_(tensorstore::ContiguousLayoutOrder::c,
sizeof(T), box.shape()) {}
template <typename CopyFunc>
absl::Status Write(IndexTransformView<> dest_transform,
TransformedArray<const T> source, CopyFunc&& copy_func) {
ElementCopyFunction copy_function =
SimpleElementwiseFunction<std::remove_reference_t<CopyFunc>(const T, T),
void*>();
return WriteToMaskedArray(dest_.byte_strided_origin_pointer().get(), &mask_,
dest_.domain(), dest_transform, source,
{©_function, ©_func});
}
absl::Status Write(IndexTransformView<> dest_transform,
TransformedArray<const T> source) {
return Write(dest_transform, source,
[](const T* source, T* dest, void*) { *dest = *source; });
}
void Rebase(ArrayView<const T> source) {
RebaseMaskedArray(
box_, source,
tensorstore::ArrayOriginCast<tensorstore::zero_origin>(dest_).value(),
mask_);
}
IndexTransform<> transform() const {
return tensorstore::IdentityTransform(dest_.domain());
}
ArrayView<const T> dest_array() const {
return ArrayView<const T>(dest_.byte_strided_origin_pointer().get(),
dest_layout_zero_origin_);
}
private:
SharedArray<T, dynamic_rank, offset_origin> dest_;
StridedLayout<> dest_layout_zero_origin_;
};
TEST(MaskDataTest, Construct) {
MaskData mask(3);
EXPECT_FALSE(mask.mask_array);
EXPECT_EQ(0, mask.num_masked_elements);
EXPECT_EQ(0, mask.region.num_elements());
}
TEST(WriteToMaskedArrayTest, RankZero) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
TENSORSTORE_EXPECT_OK(tester.Write(tester.transform(), MakeScalarArray(5)));
EXPECT_EQ(1, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(5), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankZeroError) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
EXPECT_THAT(
tester.Write(
tester.transform(), MakeScalarArray(5),
[](const int* source, int* dest, void* status) { return false; }),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(0), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOneNoElementsWritten) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).AddNew().SizedInterval(0, 0)).value(),
MakeArrayView(span<const int>{})));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(0), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOne) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {10})};
TENSORSTORE_EXPECT_OK(
tester.Write((tester.transform() | Dims(0).SizedInterval(2, 3)).value(),
MakeOffsetArray({2}, {1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({0, 1, 2, 3, 0, 0, 0, 0, 0, 0}), tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(5, 2)).value(),
MakeArray({4, 5})));
EXPECT_EQ(5, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {5}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArrayView({0, 1, 2, 3, 4, 5, 0, 0, 0, 0}), tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(9, 2)).value(),
MakeArray({6, 7})));
EXPECT_EQ(7, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {9}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({0, 1, 1, 1, 1, 1, 0, 0, 1, 1}),
tester.mask_array());
EXPECT_EQ(MakeArray({0, 1, 2, 3, 4, 5, 0, 0, 6, 7}), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOneStrided) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {8})};
auto input_to_output = IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({3})
.output_single_input_dimension(0, -2, 2, 0)
.Finalize()
.value();
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).SizedInterval(2, 3, 2).TranslateTo(0))
.value(),
MakeArray({1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(MakeArray<bool>({0, 1, 0, 1, 0, 1, 0, 0}), tester.mask_array());
EXPECT_EQ(MakeArray({0, 1, 0, 2, 0, 3, 0, 0}), tester.dest_array());
EXPECT_EQ(BoxView({2}, {5}), tester.mask_region());
}
TEST(WriteToMaskedArrayTest, RankTwo) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 1, 2, 0, 0},
{0, 3, 4, 0, 0},
{0, 5, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 2}, {3, 2}))
.value(),
MakeArray({
{7, 8},
{9, 0},
{1, 2},
})));
EXPECT_EQ(9, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 2}, {3, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{7, 8, 2, 0, 0},
{9, 0, 4, 0, 0},
{1, 2, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({3, 5}, {2, 2}))
.value(),
MakeArray({
{5, 6},
{7, 8},
})));
EXPECT_EQ(13, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 2}, {3, 5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({
{0, 0, 0, 0, 0},
{1, 1, 1, 0, 0},
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
}),
tester.mask_array());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{7, 8, 2, 0, 0},
{9, 0, 4, 5, 6},
{1, 2, 6, 7, 8},
}),
tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankTwoNonExactContainedInExistingMaskRegion) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 1, 2, 0, 0},
{0, 3, 4, 0, 0},
{0, 5, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(
tester.Write((tester.transform() |
Dims(0, 1).TranslateSizedInterval({2, 3}, {2, 2}, {2, 1}))
.value(),
MakeArray({
{7, 8},
{9, 0},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 7, 8, 0, 0},
{0, 3, 4, 0, 0},
{0, 9, 0, 0, 0},
}),
tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankTwoPartialCopy) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
EXPECT_THAT(
tester.Write((tester.transform() |
Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
}),
[](const int* source, int* dest, void* arg) {
if (*source == 4) return false;
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(WriteToMaskedArrayTest, RankTwoIndexArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 2},
{1, 4},
{2, 3},
})))
.value(),
MakeArray({1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {4, 5}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 0, 2, 0, 0},
{0, 3, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.mask_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 3},
{1, 4},
{2, 3},
})))
.value(),
MakeArray({4, 5, 6})));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {4, 5}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 4, 5, 0, 0},
{0, 6, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 1, 1, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.mask_array());
}
TEST(WriteToMaskedArrayTest, RankOneInvalidTransform) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {4})};
EXPECT_THAT(
tester.Write((tester.transform() | Dims(0).SizedInterval(2, 3)).value(),
MakeOffsetArray({1}, {1, 2, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({0, 0, 0, 0}), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOnePartialCopyDefaultError) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
EXPECT_THAT(
tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3}),
[](const int* source, int* dest, void* arg) {
if (*source == 2) return false;
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(WriteToMaskedArrayTest, RankOnePartialCopyCustomError) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
EXPECT_THAT(
tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3}),
[](const int* source, int* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 2) {
*status = absl::UnknownError("My custom error");
return false;
}
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "My custom error"));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(RebaseMaskedArrayTest, Empty) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
tester.Rebase(MakeArray({
{1, 2, 3},
{4, 5, 6},
}));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, Full) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({1, 2}, {2, 3}))
.value(),
MakeArray({
{1, 2, 3},
{4, 5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
tester.Rebase(MakeArray({
{7, 7, 7},
{7, 7, 7},
}));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, NoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {1, 2}))
.value(),
MakeArray({
{1, 2},
})));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {1, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0},
{0, 1, 2},
}),
tester.dest_array());
tester.Rebase(MakeArray({
{3, 4, 5},
{6, 7, 8},
}));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {1, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{3, 4, 5},
{6, 1, 2},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, MaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 2},
{1, 4},
})))
.value(),
MakeArray({1, 2})));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 0, 2},
{0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1},
{0, 0, 0},
}),
tester.mask_array());
tester.Rebase(MakeArray({
{3, 4, 5},
{6, 7, 8},
}));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 4, 2},
{6, 7, 8},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1},
{0, 0, 0},
}),
tester.mask_array());
}
TEST(UnionMasksTest, FirstEmpty) {
MaskedArrayTester tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, SecondEmpty) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayTester tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, MaskArrayAndMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).IndexArraySlice(MakeArray<Index>({1, 3})))
.value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).IndexArraySlice(MakeArray<Index>({1, 4})))
.value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 0}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {4}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, NoMaskArrayAndNoMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 2)).value(),
MakeArray({1, 2})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, NoMaskArrayAndNoMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 1, 0, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndNoMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, NoMaskArrayAndMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndNoMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(ResetTest, NoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(BoxView({4}, {2}), tester.mask_region());
EXPECT_EQ(2, tester.num_masked_elements());
tester.Reset();
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(ResetTest, MaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_EQ(2, tester.num_masked_elements());
tester.Reset();
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_EQ(0, tester.num_masked_elements());
}
} |
648 | cpp | google/tensorstore | grid_partition_impl | tensorstore/internal/grid_partition_impl.cc | tensorstore/internal/grid_partition_impl_test.cc | #ifndef TENSORSTORE_INTERNAL_GRID_PARTITION_IMPL_H_
#define TENSORSTORE_INTERNAL_GRID_PARTITION_IMPL_H_
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_grid_partition {
class IndexTransformGridPartition {
public:
struct StridedSet {
DimensionSet grid_dimensions;
int input_dimension;
};
struct IndexArraySet {
DimensionSet grid_dimensions;
DimensionSet input_dimensions;
std::vector<Index> grid_cell_indices;
SharedArray<Index, 2> partitioned_input_indices;
std::vector<Index> grid_cell_partition_offsets;
SharedArray<const Index, 2> partition_input_indices(
Index partition_i) const;
span<const Index> partition_grid_cell_indices(Index partition_i) const;
Index num_partitions() const {
return static_cast<Index>(grid_cell_partition_offsets.size());
}
Index FindPartition(span<const Index> grid_cell_indices) const;
};
span<const IndexArraySet> index_array_sets() const {
return index_array_sets_;
}
auto& index_array_sets() { return index_array_sets_; }
span<const StridedSet> strided_sets() const { return strided_sets_; }
auto& strided_sets() { return strided_sets_; }
IndexTransform<> GetCellTransform(
IndexTransformView<> full_transform, span<const Index> grid_cell_indices,
span<const DimensionIndex> grid_output_dimensions,
absl::FunctionRef<IndexInterval(DimensionIndex grid_dim,
Index grid_cell_index)>
get_grid_cell_output_interval) const;
absl::InlinedVector<StridedSet, internal::kNumInlinedDims> strided_sets_;
std::vector<IndexArraySet> index_array_sets_;
};
internal_index_space::TransformRep::Ptr<> InitializeCellTransform(
const IndexTransformGridPartition& info,
IndexTransformView<> full_transform);
void UpdateCellTransformForIndexArraySetPartition(
const IndexTransformGridPartition::IndexArraySet& index_array_set,
DimensionIndex set_i, Index partition_i,
internal_index_space::TransformRep* cell_transform);
absl::Status PrePartitionIndexTransformOverGrid(
IndexTransformView<> index_transform,
span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
IndexTransformGridPartition& grid_partition);
}
}
#endif
#include "tensorstore/internal/grid_partition_impl.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_map.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_grid_partition {
using ::tensorstore::internal_index_space::OutputIndexMap;
using ::tensorstore::internal_index_space::TransformRep;
using IndexArraySet = IndexTransformGridPartition::IndexArraySet;
using StridedSet = IndexTransformGridPartition::StridedSet;
SharedArray<const Index, 2>
IndexTransformGridPartition::IndexArraySet::partition_input_indices(
Index partition_i) const {
assert(partition_i >= 0 && partition_i < num_partitions());
SharedArray<const Index, 2> result;
const Index start = grid_cell_partition_offsets[partition_i];
const Index end =
static_cast<size_t>(partition_i + 1) == grid_cell_partition_offsets.size()
? partitioned_input_indices.shape()[0]
: grid_cell_partition_offsets[partition_i + 1];
assert(start >= 0 && start < partitioned_input_indices.shape()[0]);
assert(end > start && end <= partitioned_input_indices.shape()[0]);
result.pointer() =
std::shared_ptr<const Index>(partitioned_input_indices.pointer(),
&partitioned_input_indices(start, 0));
result.layout() = partitioned_input_indices.layout();
result.shape()[0] = end - start;
return result;
}
span<const Index>
IndexTransformGridPartition::IndexArraySet::partition_grid_cell_indices(
Index partition_i) const {
assert(partition_i >= 0 && partition_i < num_partitions());
assert(grid_cell_indices.size() ==
static_cast<size_t>(num_partitions() * grid_dimensions.count()));
return span(&grid_cell_indices[partition_i * grid_dimensions.count()],
grid_dimensions.count());
}
namespace {
struct GridCellIndicesIndirectPartialCompare {
DimensionSet grid_dimensions;
const Index* grid_cell_indices_for_partitions;
Index operator()(Index partition_i, const Index* full_indices) const {
const Index* other_grid_cell_indices =
grid_cell_indices_for_partitions +
partition_i * grid_dimensions.count();
DimensionIndex j = 0;
for (DimensionIndex grid_dim : grid_dimensions.index_view()) {
Index diff = other_grid_cell_indices[j] - full_indices[grid_dim];
if (diff != 0) {
return diff;
}
++j;
}
return 0;
}
};
}
Index IndexTransformGridPartition::IndexArraySet::FindPartition(
span<const Index> grid_cell_indices) const {
Index lower = 0, upper = num_partitions();
GridCellIndicesIndirectPartialCompare compare{grid_dimensions,
this->grid_cell_indices.data()};
while (lower != upper) {
Index mid = (lower + upper) / 2;
Index c = compare(mid, grid_cell_indices.data());
if (c == 0) return mid;
if (c > 0) {
upper = mid;
} else {
lower = mid + 1;
}
}
return -1;
}
void UpdateCellTransformForIndexArraySetPartition(
const IndexArraySet& index_array_set, DimensionIndex set_i,
Index partition_i, internal_index_space::TransformRep* cell_transform) {
const SharedArray<const Index, 2> partition_input_indices =
index_array_set.partition_input_indices(partition_i);
cell_transform->input_shape()[set_i] = partition_input_indices.shape()[0];
ByteStridedPointer<const Index> partition_input_indices_ptr =
partition_input_indices.byte_strided_pointer();
const Index vector_dimension_byte_stride =
partition_input_indices.byte_strides()[1];
const span<OutputIndexMap> output_maps = cell_transform->output_index_maps();
for (DimensionIndex full_input_dim :
index_array_set.input_dimensions.index_view()) {
internal_index_space::IndexArrayData& index_array_data =
output_maps[full_input_dim].index_array_data();
index_array_data.element_pointer = std::shared_ptr<const Index>(
partition_input_indices.pointer(), partition_input_indices_ptr);
partition_input_indices_ptr += vector_dimension_byte_stride;
}
}
IndexTransform<> IndexTransformGridPartition::GetCellTransform(
IndexTransformView<> full_transform, span<const Index> grid_cell_indices,
span<const DimensionIndex> grid_output_dimensions,
absl::FunctionRef<IndexInterval(DimensionIndex grid_dim,
Index grid_cell_index)>
get_grid_cell_output_interval) const {
auto cell_transform = InitializeCellTransform(*this, full_transform);
for (DimensionIndex set_i = 0, num_sets = index_array_sets().size();
set_i < num_sets; ++set_i) {
const IndexArraySet& index_array_set = index_array_sets()[set_i];
const Index partition_i = index_array_set.FindPartition(grid_cell_indices);
assert(partition_i != -1);
UpdateCellTransformForIndexArraySetPartition(
index_array_set, set_i, partition_i, cell_transform.get());
}
for (DimensionIndex set_i = 0, num_sets = strided_sets().size();
set_i < num_sets; ++set_i) {
const StridedSet& strided_set = strided_sets()[set_i];
const DimensionIndex cell_input_dim = set_i + index_array_sets().size();
IndexInterval restricted_domain =
full_transform.input_domain()[strided_set.input_dimension];
for (const DimensionIndex grid_dim :
strided_set.grid_dimensions.index_view()) {
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
IndexInterval cell_range =
get_grid_cell_output_interval(grid_dim, grid_cell_indices[grid_dim]);
const OutputIndexMapRef<> map =
full_transform.output_index_map(output_dim);
const IndexInterval cell_domain =
GetAffineTransformDomain(cell_range, map.offset(), map.stride())
.value();
restricted_domain = Intersect(restricted_domain, cell_domain);
}
assert(!restricted_domain.empty());
cell_transform->input_origin()[cell_input_dim] =
restricted_domain.inclusive_min();
cell_transform->input_shape()[cell_input_dim] = restricted_domain.size();
}
return internal_index_space::TransformAccess::Make<IndexTransform<>>(
std::move(cell_transform));
}
namespace {
template <typename SetCallbackFn>
void ForEachConnectedSet(span<const DimensionIndex> grid_output_dimensions,
IndexTransformView<> transform,
SetCallbackFn set_callback) {
DimensionSet input_dims_for_grid_dims[kMaxRank];
DimensionSet grid_dims_with_array_dependence;
for (DimensionIndex grid_dim = 0; grid_dim < grid_output_dimensions.size();
++grid_dim) {
auto [input_dims, array_dependence] =
internal::GetInputDimensionsForOutputDimension(
transform, grid_output_dimensions[grid_dim]);
input_dims_for_grid_dims[grid_dim] = input_dims;
grid_dims_with_array_dependence[grid_dim] = array_dependence;
}
DimensionSet current_input_dims, current_grid_dims;
DimensionSet remaining_grid_dims{
DimensionSet::UpTo(grid_output_dimensions.size())};
bool current_set_has_array;
const auto add_grid_dim_to_current_set =
[&](DimensionIndex grid_dim) -> DimensionSet {
assert(remaining_grid_dims.test(grid_dim));
assert(grid_dim >= 0 && grid_dim < grid_output_dimensions.size());
remaining_grid_dims.reset(grid_dim);
current_grid_dims.set(grid_dim);
auto input_dims = input_dims_for_grid_dims[grid_dim];
current_set_has_array |= grid_dims_with_array_dependence[grid_dim];
current_input_dims |= input_dims;
return input_dims;
};
const auto is_grid_dim_in_set =
[&](DimensionIndex grid_dim) -> DimensionIndex {
assert(remaining_grid_dims.test(grid_dim));
assert(grid_dim >= 0 && grid_dim < grid_output_dimensions.size());
return !(input_dims_for_grid_dims[grid_dim] & current_input_dims).none();
};
while (!remaining_grid_dims.none()) {
current_input_dims = {};
current_grid_dims = {};
current_set_has_array = false;
if (add_grid_dim_to_current_set(remaining_grid_dims.index_view().front())
.none()) {
continue;
}
for (DimensionIndex grid_dim : remaining_grid_dims.index_view()) {
if (is_grid_dim_in_set(grid_dim)) {
add_grid_dim_to_current_set(grid_dim);
}
}
set_callback(current_input_dims, current_grid_dims, current_set_has_array);
}
}
template <typename T, typename Stride, typename OutputIt, typename OutputStride>
OutputIt FillWithTiledStridedRange(T start, T size, Stride stride,
Index outer_count, Index inner_count,
OutputIt output,
OutputStride output_stride) {
const T end = start + size * stride;
for (Index outer_i = 0; outer_i < outer_count; ++outer_i) {
for (Index i = start; i != end; i += stride) {
for (Index inner_i = 0; inner_i < inner_count; ++inner_i) {
*output = i;
output += output_stride;
}
}
}
return output;
}
absl::Status GenerateSingleInputDimensionOutputIndices(
OutputIndexMapRef<> map, DimensionSet input_dims,
IndexTransformView<> index_transform, Index* output_indices,
Index output_stride) {
assert(map.method() == OutputIndexMethod::single_input_dimension);
const DimensionIndex single_input_dim = map.input_dimension();
const IndexInterval domain = index_transform.input_domain()[single_input_dim];
const Index stride = map.stride();
TENSORSTORE_RETURN_IF_ERROR(
GetAffineTransformRange(domain, map.offset(), stride));
const Index start = map.offset() + stride * domain.inclusive_min();
span<const Index> input_shape = index_transform.input_shape();
Index inner_count = 1;
Index outer_count = 1;
for (DimensionIndex input_dim : input_dims.index_view()) {
if (input_dim == single_input_dim) {
outer_count = inner_count;
inner_count = 1;
} else {
inner_count *= input_shape[input_dim];
}
}
FillWithTiledStridedRange(start, domain.size(), stride, outer_count,
inner_count, output_indices, output_stride);
return absl::OkStatus();
}
absl::Status GenerateIndexArrayOutputIndices(
OutputIndexMapRef<> map, DimensionSet input_dims,
IndexTransformView<> index_transform, Index* output_indices,
Index output_stride) {
assert(map.method() == OutputIndexMethod::array);
const DimensionIndex input_rank = index_transform.input_rank();
Index output_byte_strides[kMaxRank];
std::fill_n(&output_byte_strides[0], input_rank, static_cast<Index>(0));
DimensionIndex byte_stride = sizeof(Index) * output_stride;
Index input_dims_copy[kMaxRank];
DimensionIndex num_input_dims = 0;
for (DimensionIndex input_dim : input_dims.index_view()) {
input_dims_copy[num_input_dims++] = input_dim;
}
for (DimensionIndex i = num_input_dims - 1; i >= 0; --i) {
const DimensionIndex input_dim = input_dims_copy[i];
output_byte_strides[input_dim] = byte_stride;
byte_stride *= index_transform.input_shape()[input_dim];
}
const OutputIndexMapRef<>::IndexArrayView index_array = map.index_array();
TENSORSTORE_RETURN_IF_ERROR(ValidateIndexArrayBounds(
index_array.index_range(), index_array.array_ref()));
const Index stride = map.stride();
const Index offset = map.offset();
IterateOverArrays(
[stride, offset](const Index* source_ptr, Index* output_ptr) {
const Index source_index = *source_ptr;
*output_ptr = source_index * stride + offset;
return true;
},
skip_repeated_elements,
map.index_array().array_ref(),
ArrayView<Index>(
output_indices,
StridedLayoutView<>(
index_transform.input_shape(),
span<const Index>(&output_byte_strides[0], input_rank))));
return absl::OkStatus();
}
Result<Index> ProductOfIndirectExtents(span<const Index> input_shape,
DimensionSet dims) {
Index num_positions = 1;
for (const DimensionIndex dim : dims.index_view()) {
if (internal::MulOverflow(num_positions, input_shape[dim],
&num_positions)) {
return absl::InvalidArgumentError(
"Overflow computing number of positions in domain.");
}
}
return num_positions;
}
Result<std::vector<Index>> GenerateIndexArraySetGridCellIndices(
DimensionSet grid_dims, DimensionSet input_dims,
span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> index_transform, Index num_positions) {
const DimensionIndex num_grid_dims = grid_dims.count();
std::vector<Index> temp_cell_indices(num_grid_dims * num_positions);
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dims.index_view()) {
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
const OutputIndexMapRef<> map =
index_transform.output_index_map(output_dim);
Index* cur_cell_indices = temp_cell_indices.data() + grid_i;
if (map.method() == OutputIndexMethod::single_input_dimension) {
TENSORSTORE_RETURN_IF_ERROR(GenerateSingleInputDimensionOutputIndices(
map, input_dims, index_transform, cur_cell_indices, num_grid_dims));
} else {
assert(map.method() == OutputIndexMethod::array);
TENSORSTORE_RETURN_IF_ERROR(GenerateIndexArrayOutputIndices(
map, input_dims, index_transform, cur_cell_indices, num_grid_dims));
}
for (Index* end = cur_cell_indices + num_positions * num_grid_dims;
cur_cell_indices != end; cur_cell_indices += | #include "tensorstore/internal/grid_partition_impl.h"
#include <ostream>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/irregular_grid.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_grid_partition {
std::ostream& operator<<(std::ostream& os,
const IndexTransformGridPartition::StridedSet& s) {
return os << "{grid_dimensions=" << s.grid_dimensions
<< ", input_dimension=" << s.input_dimension << "}";
}
bool operator==(const IndexTransformGridPartition::StridedSet& a,
const IndexTransformGridPartition::StridedSet& b) {
return a.input_dimension == b.input_dimension &&
a.grid_dimensions == b.grid_dimensions;
}
bool operator!=(const IndexTransformGridPartition::StridedSet& a,
const IndexTransformGridPartition::StridedSet& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os,
const IndexTransformGridPartition::IndexArraySet& s) {
return os << "IndexArraySet where:\n"
<< " grid_dimensions=" << s.grid_dimensions << "\n"
<< " input_dimensions=" << s.input_dimensions << "\n"
<< " grid_cell_indices="
<< Array(s.grid_cell_indices.data(),
{s.num_partitions(),
static_cast<Index>(s.grid_dimensions.count())})
<< "\n"
<< " partitioned_input_indices=" << s.partitioned_input_indices
<< "\n"
<< " grid_cell_partition_offsets="
<< span(s.grid_cell_partition_offsets) << "\n";
}
bool operator==(const IndexTransformGridPartition::IndexArraySet& a,
const IndexTransformGridPartition::IndexArraySet& b) {
return a.input_dimensions == b.input_dimensions &&
a.grid_dimensions == b.grid_dimensions &&
a.grid_cell_indices == b.grid_cell_indices &&
a.partitioned_input_indices == b.partitioned_input_indices &&
a.grid_cell_partition_offsets == b.grid_cell_partition_offsets;
}
bool operator!=(const IndexTransformGridPartition::IndexArraySet& a,
const IndexTransformGridPartition::IndexArraySet& b) {
return !(a == b);
}
}
}
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal::IrregularGrid;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
TEST(RegularGridTest, Basic) {
std::vector<Index> grid_cell_shape{1, 2, 3};
RegularGridRef grid{grid_cell_shape};
EXPECT_EQ(3, grid.rank());
IndexInterval grid_cell;
EXPECT_EQ(grid(0, 7, &grid_cell), 7);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(7, 1));
EXPECT_EQ(grid(1, 7, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(6, 2));
EXPECT_EQ(grid(2, 7, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(6, 3));
}
TEST(RegularGridTest, Empty) {
RegularGridRef grid;
EXPECT_EQ(0, grid.rank());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, NoGridDimensions) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({5})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
span<const DimensionIndex> grid_output_dimensions;
span<const Index> grid_cell_shape;
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, NoConnectedSets) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({5})
.output_constant(0, 3)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {2};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, StridedSingleSet) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({5})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {2};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
0}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
StridedSingleDimensionSets) {
auto transform = tensorstore::IndexTransformBuilder<>(5, 4)
.input_origin({1, 2, 3, 4, 5})
.input_shape({6, 7, 8, 9, 10})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 3)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {2, 0};
const Index grid_cell_shape[] = {5, 10};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
4},
IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({1}),
2}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, DiagonalStridedSet) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({1})
.input_shape({6})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0, 1};
const Index grid_cell_shape[] = {5, 10};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0, 1}),
0}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, DiagonalStridedSets) {
auto transform = tensorstore::IndexTransformBuilder<>(5, 4)
.input_origin({1, 2, 3, 4, 5})
.input_shape({6, 7, 8, 9, 10})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 4)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 3)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {2, 0, 1};
const Index grid_cell_shape[] = {5, 10, 15};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0, 2}),
4},
IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({1}),
2}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, SingleIndexArrayDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {4};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0}),
DimensionSet::FromIndices({0}),
{1, 3, 5},
MakeArray<Index>({{0}, {3}, {1}, {2}}),
{0, 1, 2}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
IndexArrayAndStridedDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.output_single_input_dimension(1, 3, 5, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
const Index grid_cell_shape[] = {10, 4};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0}),
{0, 1, 0, 5, 1, 3, 1, 5},
MakeArray<Index>({{0}, {1}, {3}, {2}}),
{0, 1, 2, 3}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
IndexArrayAndStridedDimensionIndependent) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{0}, {0}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0, 1};
const Index grid_cell_shape[] = {3, 1};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({1}),
DimensionSet::FromIndices({0}),
{0},
MakeArray<Index>({{0}, {1}}),
{0}}));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
1}));
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
TwoOutputsTwoDimensionalIndexArrays) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({-1, 2})
.input_shape({2, 3})
.output_index_array(0, 5, 2, MakeArray<Index>({{1, 2, 3}, {3, 4, 5}}))
.output_index_array(1, 2, 1, MakeArray<Index>({{5, 9, 1}, {8, 2, 3}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
const Index grid_cell_shape[] = {3, 5};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0, 1}),
{1, 2, 1, 3, 2, 1, 3, 1, 3, 2},
MakeArray<Index>({{-1, 4}, {0, 3}, {0, 4}, {-1, 2}, {-1, 3}, {0, 2}}),
{0, 2, 3, 4, 5}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, UnboundedDomain) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({-kInfIndex})
.input_shape({100})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {5};
IndexTransformGridPartition partitioned;
auto status = PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned);
EXPECT_THAT(status,
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension 0 has unbounded domain .*"));
}
TEST(PrePartitionIndexTransformOverRegularGridTest, IndexArrayOutOfBounds) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({2, 3, 4}),
IndexInterval::Closed(3, 10))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {5};
IndexTransformGridPartition partitioned;
auto status = PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned);
EXPECT_THAT(status,
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 2 is outside valid range \\[3, 11\\)"));
}
TEST(PrePartitionIndexTransformOverRegularGridTest, StridedDimensionOverflow) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.output_single_input_dimension(1, -kInfIndex, -kInfIndex, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
const Index grid_cell_shape[] = {10, 4};
IndexTransformGridPartition partitioned;
auto status = PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned);
EXPECT_THAT(status, MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(PrePartitionIndexTransformOverGridTest, SingleIndexArrayDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
std::vector<Index> dimension0{-1, 5, 10};
IrregularGrid grid{{dimension0}};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0}),
DimensionSet::FromIndices({0}),
{1, 2},
MakeArray<Index>({{0}, {1}, {2}, {3}}),
{0, 1}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverGridTest, IndexArrayAndStridedDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.output_single_input_dimension(1, 3, 5, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
std::vector<Index> dimension0{-1, 6, 9, 15};
std::vector<Index> dimension1{10, 11, 15, 22};
IrregularGrid grid({dimension0, dimension1});
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0}),
{0, -1, 1, 3, 2, 2, 3, 1},
MakeArray<Index>({{0}, {1}, {2}, {3}}),
{0, 1, 2, 3}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverGridTest,
IndexArrayAndStridedDimensionIndependent) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{0}, {0}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0, 1};
std::vector<Index> dimension0{1, 2, 3, 7};
std::vector<Index> dimension1{-1, 0, 2, 4, 5};
IrregularGrid grid({dimension0, dimension1});
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({1}),
DimensionSet::FromIndices({0}),
{1},
MakeArray<Index>({{0}, {1}}),
{0}}));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
1}));
}
TEST(PrePartitionIndexTransformOverGridTest,
TwoOutputsTwoDimensionalIndexArrays) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({-1, 2})
.input_shape({2, 3})
.output_index_array(0, 5, 2, MakeArray<Index>({{1, 2, 3}, {3, 4, 5}}))
.output_index_array(1, 2, 1, MakeArray<Index>({{5, 9, 1}, {8, 2, 3}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
std::vector<Index> dimension0{1, 2, 3, 7, 10};
std::vector<Index> dimension1{-1, 0, 2, 4, 5, 8};
IrregularGrid grid{{dimension0, dimension1}};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0, 1}),
{2, 5, 3, 4, 4, 5},
MakeArray<Index>({{-1, 4}, {0, 3}, {0, 4}, {-1, 2}, {-1, 3}, {0, 2}}),
{0, 3, 4}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
} |
649 | cpp | google/tensorstore | grid_chunk_key_ranges_base10 | tensorstore/internal/grid_chunk_key_ranges_base10.cc | tensorstore/internal/grid_chunk_key_ranges_base10_test.cc | #ifndef TENSORSTORE_INTERNAL_GRID_CHUNK_KEY_RANGES_BASE10_H_
#define TENSORSTORE_INTERNAL_GRID_CHUNK_KEY_RANGES_BASE10_H_
#include <string>
#include <string_view>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
class Base10LexicographicalGridIndexKeyParser
: public LexicographicalGridIndexKeyParser {
public:
explicit Base10LexicographicalGridIndexKeyParser(DimensionIndex rank,
char dimension_separator)
: rank(rank), dimension_separator(dimension_separator) {}
std::string FormatKey(span<const Index> grid_indices) const final;
bool ParseKey(std::string_view key, span<Index> grid_indices) const final;
Index MinGridIndexForLexicographicalOrder(
DimensionIndex dim, IndexInterval grid_interval) const final;
DimensionIndex rank;
char dimension_separator;
};
Index MinValueWithMaxBase10Digits(Index exclusive_max);
}
}
#endif
#include "tensorstore/internal/grid_chunk_key_ranges_base10.h"
#include <string>
#include <string_view>
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
std::string Base10LexicographicalGridIndexKeyParser::FormatKey(
span<const Index> grid_indices) const {
if (rank == 0) return "0";
std::string key;
FormatGridIndexKeyWithDimensionSeparator(
key, dimension_separator,
[](std::string& out, DimensionIndex dim, Index grid_index) {
absl::StrAppend(&out, grid_index);
},
rank, grid_indices);
return key;
}
bool Base10LexicographicalGridIndexKeyParser::ParseKey(
std::string_view key, span<Index> grid_indices) const {
return ParseGridIndexKeyWithDimensionSeparator(
dimension_separator,
[](std::string_view part, DimensionIndex dim, Index& grid_index) {
if (part.empty() || !absl::ascii_isdigit(part.front()) ||
!absl::ascii_isdigit(part.back()) ||
!absl::SimpleAtoi(part, &grid_index)) {
return false;
}
return true;
},
key, grid_indices);
}
Index Base10LexicographicalGridIndexKeyParser::
MinGridIndexForLexicographicalOrder(DimensionIndex dim,
IndexInterval grid_interval) const {
return MinValueWithMaxBase10Digits(grid_interval.exclusive_max());
}
Index MinValueWithMaxBase10Digits(Index exclusive_max) {
if (exclusive_max <= 10) {
return 0;
}
Index min_value = 10;
while (min_value * 10 < exclusive_max) {
min_value *= 10;
}
return min_value;
}
}
} | #include "tensorstore/internal/grid_chunk_key_ranges_base10.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::internal::Base10LexicographicalGridIndexKeyParser;
using ::tensorstore::internal::MinValueWithMaxBase10Digits;
TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank0) {
Base10LexicographicalGridIndexKeyParser parser(0,
'/');
EXPECT_THAT(parser.FormatKey({}), "0");
}
TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank1) {
Base10LexicographicalGridIndexKeyParser parser(1,
'/');
EXPECT_THAT(parser.FormatKey({{2}}), "2");
EXPECT_THAT(parser.FormatKey({}), "");
}
TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank2) {
Base10LexicographicalGridIndexKeyParser parser(2,
'/');
EXPECT_THAT(parser.FormatKey({{2, 3}}), "2/3");
EXPECT_THAT(parser.FormatKey({{2}}), "2/");
EXPECT_THAT(parser.FormatKey({}), "");
}
TEST(Base10LexicographicalGridIndexKeyParserTest, ParseKeyRank1) {
Base10LexicographicalGridIndexKeyParser parser(1,
'/');
Index indices[1];
EXPECT_TRUE(parser.ParseKey("2", indices));
EXPECT_THAT(indices, ::testing::ElementsAre(2));
EXPECT_FALSE(parser.ParseKey("", indices));
EXPECT_FALSE(parser.ParseKey("-1", indices));
EXPECT_FALSE(parser.ParseKey("a", indices));
EXPECT_FALSE(parser.ParseKey("2/3", indices));
EXPECT_FALSE(parser.ParseKey("2/", indices));
}
TEST(Base10LexicographicalGridIndexKeyParserTest, ParseKeyRank2) {
Base10LexicographicalGridIndexKeyParser parser(2,
'/');
Index indices[2];
EXPECT_TRUE(parser.ParseKey("2/3", indices));
EXPECT_THAT(indices, ::testing::ElementsAre(2, 3));
EXPECT_TRUE(parser.ParseKey("212/335", indices));
EXPECT_THAT(indices, ::testing::ElementsAre(212, 335));
EXPECT_FALSE(parser.ParseKey("1", indices));
EXPECT_FALSE(parser.ParseKey("", indices));
EXPECT_FALSE(parser.ParseKey("1/2/3", indices));
EXPECT_FALSE(parser.ParseKey("1/2/", indices));
}
TEST(Base10LexicographicalGridIndexKeyParserTest,
MinGridIndexForLexicographicalOrder) {
Base10LexicographicalGridIndexKeyParser parser(2,
'/');
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 1)),
0);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 9)),
0);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 10)),
0);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 11)),
10);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 100)),
10);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 101)),
100);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 999)),
100);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 1000)),
100);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 1001)),
1000);
}
TEST(MinValueWithMaxBase10DigitsTest, Basic) {
EXPECT_EQ(0, MinValueWithMaxBase10Digits(0));
EXPECT_EQ(0, MinValueWithMaxBase10Digits(1));
EXPECT_EQ(0, MinValueWithMaxBase10Digits(9));
EXPECT_EQ(0, MinValueWithMaxBase10Digits(10));
EXPECT_EQ(10, MinValueWithMaxBase10Digits(11));
EXPECT_EQ(10, MinValueWithMaxBase10Digits(100));
EXPECT_EQ(100, MinValueWithMaxBase10Digits(101));
EXPECT_EQ(100, MinValueWithMaxBase10Digits(999));
EXPECT_EQ(100, MinValueWithMaxBase10Digits(1000));
EXPECT_EQ(1000, MinValueWithMaxBase10Digits(1001));
}
} |
650 | cpp | google/tensorstore | ref_counted_string | tensorstore/internal/ref_counted_string.cc | tensorstore/internal/ref_counted_string_test.cc | #ifndef TENSORSTORE_INTERNAL_REF_COUNTED_STRING_H_
#define TENSORSTORE_INTERNAL_REF_COUNTED_STRING_H_
#include <assert.h>
#include <stddef.h>
#include <atomic>
#include <new>
#include <string_view>
#include <utility>
namespace tensorstore {
namespace internal {
class RefCountedString {
public:
RefCountedString() : data_(nullptr) {}
RefCountedString(std::string_view s) : data_(AllocateCopy(s)) {}
RefCountedString(const char* s) : RefCountedString(std::string_view(s)) {}
RefCountedString(const RefCountedString& other) noexcept
: data_(other.data_) {
if (data_) {
header().IncrementReferenceCount();
}
}
RefCountedString(RefCountedString&& other) noexcept : data_(other.data_) {
other.data_ = nullptr;
}
RefCountedString& operator=(const RefCountedString& other) noexcept;
RefCountedString& operator=(RefCountedString&& other) noexcept {
auto temp = other.data_;
other.data_ = data_;
data_ = temp;
return *this;
}
RefCountedString& operator=(std::string_view s);
RefCountedString& operator=(const char* s);
~RefCountedString() {
if (!data_) return;
header().DecrementReferenceCount();
}
bool empty() const { return data_ == nullptr; }
const char* data() const { return data_; }
size_t size() const { return data_ ? header().length : 0; }
char operator[](size_t i) const {
assert(i <= size());
return data_[i];
}
const char* begin() const { return data_; }
const char* end() const { return data_ + size(); }
operator std::string_view() const { return std::string_view(data(), size()); }
template <typename Sink>
friend void AbslStringify(Sink&& sink, const RefCountedString& x) {
sink.Append(std::string_view(x));
}
friend bool operator==(const RefCountedString& a, const RefCountedString& b) {
return a.data_ == b.data_ || std::string_view(a) == std::string_view(b);
}
friend bool operator<(const RefCountedString& a, const RefCountedString& b) {
return std::string_view(a) < std::string_view(b);
}
friend bool operator<=(const RefCountedString& a, const RefCountedString& b) {
return std::string_view(a) <= std::string_view(b);
}
friend bool operator>(const RefCountedString& a, const RefCountedString& b) {
return std::string_view(a) > std::string_view(b);
}
friend bool operator>=(const RefCountedString& a, const RefCountedString& b) {
return std::string_view(a) >= std::string_view(b);
}
friend bool operator!=(const RefCountedString& a, const RefCountedString& b) {
return !(a == b);
}
friend bool operator==(std::string_view a, const RefCountedString& b) {
return a == std::string_view(b);
}
friend bool operator<(std::string_view a, const RefCountedString& b) {
return a < std::string_view(b);
}
friend bool operator<=(std::string_view a, const RefCountedString& b) {
return a <= std::string_view(b);
}
friend bool operator>(std::string_view a, const RefCountedString& b) {
return a > std::string_view(b);
}
friend bool operator>=(std::string_view a, const RefCountedString& b) {
return a >= std::string_view(b);
}
friend bool operator!=(std::string_view a, const RefCountedString& b) {
return a != std::string_view(b);
}
friend bool operator==(const char* a, const RefCountedString& b) {
return a == std::string_view(b);
}
friend bool operator<(const char* a, const RefCountedString& b) {
return a < std::string_view(b);
}
friend bool operator<=(const char* a, const RefCountedString& b) {
return a <= std::string_view(b);
}
friend bool operator>(const char* a, const RefCountedString& b) {
return a > std::string_view(b);
}
friend bool operator>=(const char* a, const RefCountedString& b) {
return a >= std::string_view(b);
}
friend bool operator!=(const char* a, const RefCountedString& b) {
return a != std::string_view(b);
}
friend bool operator==(const RefCountedString& a, std::string_view b) {
return std::string_view(a) == b;
}
friend bool operator<(const RefCountedString& a, std::string_view b) {
return std::string_view(a) < b;
}
friend bool operator<=(const RefCountedString& a, std::string_view b) {
return std::string_view(a) <= b;
}
friend bool operator>(const RefCountedString& a, std::string_view b) {
return std::string_view(a) > b;
}
friend bool operator>=(const RefCountedString& a, std::string_view b) {
return std::string_view(a) >= b;
}
friend bool operator!=(const RefCountedString& a, std::string_view b) {
return std::string_view(a) != b;
}
friend bool operator==(const RefCountedString& a, const char* b) {
return std::string_view(a) == b;
}
friend bool operator<(const RefCountedString& a, const char* b) {
return std::string_view(a) < b;
}
friend bool operator<=(const RefCountedString& a, const char* b) {
return std::string_view(a) <= b;
}
friend bool operator>(const RefCountedString& a, const char* b) {
return std::string_view(a) > b;
}
friend bool operator>=(const RefCountedString& a, const char* b) {
return std::string_view(a) >= b;
}
friend bool operator!=(const RefCountedString& a, const char* b) {
return std::string_view(a) != b;
}
template <typename H>
friend H AbslHashValue(H h, const RefCountedString& s) {
return H::combine_contiguous(std::move(h), s.data_, s.size());
}
private:
friend class RefCountedStringWriter;
struct Header {
size_t length;
mutable std::atomic<size_t> ref_count{1};
void IncrementReferenceCount() const {
ref_count.fetch_add(1, std::memory_order_relaxed);
}
void DecrementReferenceCount() const {
if (ref_count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
Deallocate();
}
}
void Deallocate() const;
};
static char* Allocate(size_t size);
static const char* AllocateCopy(std::string_view s);
const Header& header() const {
return reinterpret_cast<const Header*>(data_)[-1];
}
const char* data_;
};
class RefCountedStringWriter {
public:
RefCountedStringWriter() = default;
explicit RefCountedStringWriter(size_t size) {
string_.data_ = RefCountedString::Allocate(size);
}
RefCountedStringWriter(RefCountedStringWriter&& other) = default;
RefCountedStringWriter(const RefCountedStringWriter& other) = delete;
char* data() { return const_cast<char*>(string_.data()); }
size_t size() const { return string_.size(); }
operator RefCountedString() && { return std::move(string_); }
private:
RefCountedString string_;
};
template <typename T, typename SFINAE>
struct HeapUsageEstimator;
template <>
struct HeapUsageEstimator<RefCountedString, void> {
static size_t EstimateHeapUsage(const RefCountedString& x, size_t max_depth) {
return x.size();
}
};
}
}
#endif
#include "tensorstore/internal/ref_counted_string.h"
#include <cstring>
#include <new>
namespace tensorstore {
namespace internal {
RefCountedString& RefCountedString::operator=(
const RefCountedString& other) noexcept {
if (other.data_) other.header().IncrementReferenceCount();
if (data_) header().DecrementReferenceCount();
data_ = other.data_;
return *this;
}
RefCountedString& RefCountedString::operator=(std::string_view s) {
auto* data = AllocateCopy(s);
if (data_) header().DecrementReferenceCount();
data_ = data;
return *this;
}
RefCountedString& RefCountedString::operator=(const char* s) {
return *this = std::string_view(s);
}
char* RefCountedString::Allocate(size_t size) {
if (size == 0) return nullptr;
void* ptr = ::operator new(size + sizeof(Header));
new (ptr) Header{size};
return static_cast<char*>(ptr) + sizeof(Header);
}
const char* RefCountedString::AllocateCopy(std::string_view s) {
if (s.empty()) return nullptr;
char* data = Allocate(s.size());
std::memcpy(data, s.data(), s.size());
return data;
}
void RefCountedString::Header::Deallocate() const {
::operator delete(const_cast<Header*>(this), length + sizeof(Header));
}
}
} | #include "tensorstore/internal/ref_counted_string.h"
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::RefCountedString;
using ::tensorstore::internal::RefCountedStringWriter;
TEST(RefCountedStringTest, DefaultConstruct) {
RefCountedString s;
EXPECT_EQ("", std::string_view(s));
EXPECT_EQ("", std::string(s));
EXPECT_TRUE(s.empty());
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0, s.size());
EXPECT_EQ(nullptr, s.begin());
EXPECT_EQ(nullptr, s.end());
EXPECT_EQ(s, s);
auto other = s;
EXPECT_EQ(nullptr, other.data());
}
TEST(RefCountedStringTest, EmptyStringConstruct) {
RefCountedString s("");
EXPECT_EQ("", std::string_view(s));
EXPECT_EQ("", std::string(s));
EXPECT_TRUE(s.empty());
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0, s.size());
EXPECT_EQ(nullptr, s.begin());
EXPECT_EQ(nullptr, s.end());
EXPECT_EQ(s, s);
}
TEST(RefCountedStringTest, NonEmptyStringConstruct) {
RefCountedString s("abc");
EXPECT_EQ("abc", std::string_view(s));
EXPECT_EQ("abc", std::string(s));
EXPECT_FALSE(s.empty());
EXPECT_EQ(3, s.size());
EXPECT_EQ("abc", s);
EXPECT_NE("abd", s);
EXPECT_EQ(s, "abc");
EXPECT_LT("ab", s);
EXPECT_LE("abc", s);
EXPECT_GT("abd", s);
}
TEST(RefCountedStringTest, Copy) {
RefCountedString x("abc");
RefCountedString y = x;
EXPECT_EQ(x.data(), y.data());
}
TEST(RefCountedStringTest, Move) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, EmptyMoveAssignNonEmpty) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y;
y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, EmptyMoveAssignEmpty) {
RefCountedString x;
RefCountedString y;
y = std::move(x);
EXPECT_TRUE(y.empty());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, NonEmptyMoveAssignNonEmpty) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y("def");
y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
}
TEST(RefCountedStringTest, NonEmptyMoveAssignEmpty) {
RefCountedString x;
RefCountedString y("def");
y = std::move(x);
EXPECT_TRUE(y.empty());
}
TEST(RefCountedStringTest, NonEmptyCopyAssignNonEmpty) {
RefCountedString x("abc");
RefCountedString y("def");
y = x;
EXPECT_EQ("abc", y);
}
TEST(RefCountedStringTest, EmptyCopyAssignNonEmpty) {
RefCountedString x("abc");
RefCountedString y;
y = x;
EXPECT_EQ("abc", y);
}
TEST(RefCountedStringTest, NonEmptyCopyAssignEmpty) {
RefCountedString x;
RefCountedString y("def");
y = x;
EXPECT_EQ("", y);
}
TEST(RefCountedStringTest, EmptyCopyAssignEmpty) {
RefCountedString x;
RefCountedString y;
y = x;
EXPECT_EQ("", y);
}
TEST(RefCountedStringTest, NonEmptyAssignFromStringView) {
RefCountedString x("def");
x = std::string_view("abc");
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, EmptyAssignFromStringView) {
RefCountedString x;
x = std::string_view("abc");
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, NonEmptyAssignFromCStr) {
RefCountedString x("def");
x = "abc";
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, EmptyAssignFromCStr) {
RefCountedString x;
x = "abc";
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, SelfAssign) {
RefCountedString x("abc");
x = x;
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, SelfAssignStringView) {
RefCountedString x("abc");
x = std::string_view(x);
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, Comparison) {
RefCountedString a("abc");
RefCountedString a1("abc");
std::string_view a_sv = "abc";
const char* a_cstr = "abc";
RefCountedString b("def");
std::string_view b_sv = "def";
const char* b_cstr = "def";
EXPECT_TRUE(a == a);
EXPECT_TRUE(a == a1);
EXPECT_TRUE(a == a_sv);
EXPECT_TRUE(a == a_cstr);
EXPECT_TRUE(a_sv == a);
EXPECT_TRUE(a_cstr == a);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a != a1);
EXPECT_FALSE(a != a_sv);
EXPECT_FALSE(a != a_cstr);
EXPECT_FALSE(a_sv != a);
EXPECT_FALSE(a_cstr != a);
EXPECT_TRUE(a <= a);
EXPECT_TRUE(a <= a_sv);
EXPECT_TRUE(a <= a_cstr);
EXPECT_TRUE(a_sv <= a);
EXPECT_TRUE(a_cstr <= a);
EXPECT_TRUE(a <= a1);
EXPECT_TRUE(a >= a);
EXPECT_TRUE(a >= a_sv);
EXPECT_TRUE(a >= a_cstr);
EXPECT_TRUE(a_sv >= a);
EXPECT_TRUE(a_cstr >= a);
EXPECT_TRUE(a >= a1);
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= b_sv);
EXPECT_TRUE(a <= b_cstr);
EXPECT_TRUE(a_sv <= b);
EXPECT_TRUE(a_cstr <= b);
EXPECT_TRUE(a <= b_sv);
EXPECT_TRUE(a <= b_cstr);
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < b_sv);
EXPECT_TRUE(a < b_cstr);
EXPECT_TRUE(a_sv < b);
EXPECT_TRUE(a_cstr < b);
EXPECT_FALSE(a > b);
EXPECT_FALSE(a_sv > b);
EXPECT_FALSE(a_cstr > b);
EXPECT_FALSE(a > b_sv);
EXPECT_FALSE(a > b_cstr);
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= b_sv);
EXPECT_FALSE(a >= b_cstr);
EXPECT_FALSE(a_sv >= b);
EXPECT_FALSE(a_cstr >= b);
}
TEST(RefCountedStringTest, StdStringConversion) {
std::string s = static_cast<std::string>(RefCountedString("abc"));
EXPECT_EQ("abc", s);
}
TEST(RefCountedStringTest, Indexing) {
RefCountedString x = "abc";
EXPECT_EQ('a', x[0]);
EXPECT_EQ('c', x[2]);
}
TEST(RefCountedStringTest, Writer) {
RefCountedStringWriter writer(3);
memcpy(writer.data(), "abc", 3);
RefCountedString s = std::move(writer);
EXPECT_EQ("abc", s);
}
} |
651 | cpp | google/tensorstore | storage_statistics | tensorstore/internal/storage_statistics.cc | tensorstore/driver/neuroglancer_precomputed/storage_statistics_test.cc | #ifndef TENSORSTORE_INTERNAL_STORAGE_STATISTICS_H_
#define TENSORSTORE_INTERNAL_STORAGE_STATISTICS_H_
#include <atomic>
#include "tensorstore/array_storage_statistics.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal {
struct GetStorageStatisticsAsyncOperationState
: public internal::AtomicReferenceCount<
GetStorageStatisticsAsyncOperationState> {
explicit GetStorageStatisticsAsyncOperationState(
Future<ArrayStorageStatistics>& future,
const GetArrayStorageStatisticsOptions& options);
std::atomic<int64_t> chunks_present{0};
std::atomic<int64_t> total_chunks = 0;
GetArrayStorageStatisticsOptions options;
Promise<ArrayStorageStatistics> promise;
std::atomic<bool> chunk_missing{false};
void MaybeStopEarly();
void IncrementChunksPresent() {
if (++chunks_present == 1) {
MaybeStopEarly();
}
}
void ChunkMissing() {
if (chunk_missing.exchange(true) == false) {
MaybeStopEarly();
}
}
void SetError(absl::Status error) {
SetDeferredResult(promise, std::move(error));
}
virtual ~GetStorageStatisticsAsyncOperationState();
};
}
}
#endif
#include "tensorstore/internal/storage_statistics.h"
#include <stdint.h>
#include <atomic>
#include <utility>
#include "tensorstore/array_storage_statistics.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal {
GetStorageStatisticsAsyncOperationState::
GetStorageStatisticsAsyncOperationState(
Future<ArrayStorageStatistics>& future,
const GetArrayStorageStatisticsOptions& options)
: options(options) {
auto p = PromiseFuturePair<ArrayStorageStatistics>::Make(std::in_place);
this->promise = std::move(p.promise);
future = std::move(p.future);
}
void GetStorageStatisticsAsyncOperationState::MaybeStopEarly() {
if (options.mask & ArrayStorageStatistics::query_not_stored) {
if (chunks_present.load() == 0) {
return;
}
}
if (options.mask & ArrayStorageStatistics::query_fully_stored) {
if (chunk_missing.load() == false) {
return;
}
}
SetDeferredResult(promise, ArrayStorageStatistics{});
}
GetStorageStatisticsAsyncOperationState::
~GetStorageStatisticsAsyncOperationState() {
auto& r = promise.raw_result();
if (!r.ok()) return;
r->mask = options.mask;
int64_t num_present = chunks_present.load(std::memory_order_relaxed);
if (options.mask & ArrayStorageStatistics::query_not_stored) {
r->not_stored = (num_present == 0);
}
if (options.mask & ArrayStorageStatistics::query_fully_stored) {
r->fully_stored = num_present == total_chunks;
}
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/open.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::ArrayStorageStatistics;
using ::tensorstore::ChunkLayout;
using ::tensorstore::Context;
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Schema;
class StorageStatisticsTest : public ::testing::Test {
protected:
Context context = Context::Default();
tensorstore::internal::MockKeyValueStore::MockPtr mock_kvstore =
*context.GetResource<tensorstore::internal::MockKeyValueStoreResource>()
.value();
tensorstore::kvstore::DriverPtr memory_store =
tensorstore::GetMemoryKeyValueStore();
public:
StorageStatisticsTest() {
mock_kvstore->forward_to = memory_store;
mock_kvstore->log_requests = true;
}
};
TEST_F(StorageStatisticsTest, FullyLexicographicOrder) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open(
{
{"driver", "neuroglancer_precomputed"},
{"kvstore", {{"driver", "mock_key_value_store"}}},
},
Schema::Shape({100, 200, 300, 1}), dtype_v<uint8_t>,
ChunkLayout::ReadChunkShape({10, 20, 30, 1}), context,
tensorstore::OpenMode::create)
.result());
mock_kvstore->request_log.pop_all();
{
auto transformed = store | tensorstore::AllDims().HalfOpenInterval(
{1, 1, 1, 0}, {20, 5, 5, 1});
EXPECT_THAT(tensorstore::GetStorageStatistics(
transformed, ArrayStorageStatistics::query_not_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored,
true}));
EXPECT_THAT(mock_kvstore->request_log.pop_all(),
::testing::UnorderedElementsAreArray({
MatchesJson({{"type", "read"},
{"key", "1_1_1/0-10_0-20_0-30"},
{"byte_range_exclusive_max", 0}}),
MatchesJson({{"type", "read"},
{"key", "1_1_1/10-20_0-20_0-30"},
{"byte_range_exclusive_max", 0}}),
}));
TENSORSTORE_ASSERT_OK(
tensorstore::Write(tensorstore::MakeScalarArray<uint8_t>(42),
transformed)
.result());
mock_kvstore->request_log.pop_all();
EXPECT_THAT(tensorstore::GetStorageStatistics(
transformed, ArrayStorageStatistics::query_not_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored,
false}));
EXPECT_THAT(mock_kvstore->request_log.pop_all(),
::testing::UnorderedElementsAreArray({
MatchesJson({{"type", "read"},
{"key", "1_1_1/0-10_0-20_0-30"},
{"byte_range_exclusive_max", 0}}),
MatchesJson({{"type", "read"},
{"key", "1_1_1/10-20_0-20_0-30"},
{"byte_range_exclusive_max", 0}}),
}));
EXPECT_THAT(tensorstore::GetStorageStatistics(
transformed, ArrayStorageStatistics::query_not_stored,
ArrayStorageStatistics::query_fully_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored |
ArrayStorageStatistics::query_fully_stored,
false, true}));
EXPECT_THAT(mock_kvstore->request_log.pop_all(),
::testing::UnorderedElementsAreArray({
MatchesJson({{"type", "read"},
{"key", "1_1_1/0-10_0-20_0-30"},
{"byte_range_exclusive_max", 0}}),
MatchesJson({{"type", "read"},
{"key", "1_1_1/10-20_0-20_0-30"},
{"byte_range_exclusive_max", 0}}),
}));
}
{
EXPECT_THAT(tensorstore::GetStorageStatistics(
store, ArrayStorageStatistics::query_not_stored,
ArrayStorageStatistics::query_fully_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored |
ArrayStorageStatistics::query_fully_stored,
false, false}));
EXPECT_THAT(mock_kvstore->request_log.pop_all(),
::testing::UnorderedElementsAreArray({
MatchesJson({{"type", "list"},
{"range", {"1_1_1/", "1_1_10"}},
{"strip_prefix_length", 6}}),
}));
}
{
EXPECT_THAT(tensorstore::GetStorageStatistics(
store | tensorstore::Dims(0).HalfOpenInterval(12, 15),
ArrayStorageStatistics::query_not_stored,
ArrayStorageStatistics::query_fully_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored |
ArrayStorageStatistics::query_fully_stored,
false, false}));
EXPECT_THAT(mock_kvstore->request_log.pop_all(),
::testing::ElementsAre(
MatchesJson({{"type", "list"},
{"range", {"1_1_1/10-20_", "1_1_1/10-20`"}},
{"strip_prefix_length", 6}})));
}
{
EXPECT_THAT(tensorstore::GetStorageStatistics(
store | tensorstore::AllDims().IndexSlice({10, 25, 35, 0}),
ArrayStorageStatistics::query_not_stored,
ArrayStorageStatistics::query_fully_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored |
ArrayStorageStatistics::query_fully_stored,
true, false}));
EXPECT_THAT(
mock_kvstore->request_log.pop_all(),
::testing::ElementsAre(MatchesJson({{"type", "read"},
{"key", "1_1_1/10-20_20-40_30-60"},
{"byte_range_exclusive_max", 0}})));
}
{
EXPECT_THAT(tensorstore::GetStorageStatistics(
store | tensorstore::AllDims().IndexSlice({2, 2, 2, 0}),
ArrayStorageStatistics::query_not_stored,
ArrayStorageStatistics::query_fully_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored |
ArrayStorageStatistics::query_fully_stored,
false, true}));
EXPECT_THAT(
mock_kvstore->request_log.pop_all(),
::testing::ElementsAre(MatchesJson({{"type", "read"},
{"key", "1_1_1/0-10_0-20_0-30"},
{"byte_range_exclusive_max", 0}})));
}
}
TEST_F(StorageStatisticsTest, SemiLexicographicOrder) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open(
{
{"driver", "neuroglancer_precomputed"},
{"kvstore", {{"driver", "mock_key_value_store"}}},
},
Schema::Shape({100, 100, 100, 1}), dtype_v<uint8_t>,
ChunkLayout::ReadChunkShape({1, 1, 1, 1}), context,
tensorstore::OpenMode::create)
.result());
mock_kvstore->request_log.pop_all();
EXPECT_THAT(tensorstore::GetStorageStatistics(
store | tensorstore::Dims(0).HalfOpenInterval(8, 15),
ArrayStorageStatistics::query_not_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored,
true}));
EXPECT_THAT(mock_kvstore->request_log.pop_all(),
::testing::UnorderedElementsAreArray({
MatchesJson({{"type", "list"},
{"range", {"1_1_1/8-9_", "1_1_1/8-9`"}},
{"strip_prefix_length", 6}}),
MatchesJson({{"type", "list"},
{"range", {"1_1_1/9-10_", "1_1_1/9-10`"}},
{"strip_prefix_length", 6}}),
MatchesJson({{"type", "list"},
{"range", {"1_1_1/10-11_", "1_1_1/14-15`"}},
{"strip_prefix_length", 6}}),
}));
EXPECT_THAT(
tensorstore::GetStorageStatistics(
store | tensorstore::Dims(0, 1).HalfOpenInterval({3, 8}, {4, 15}),
ArrayStorageStatistics::query_not_stored)
.result(),
::testing::Optional(ArrayStorageStatistics{
ArrayStorageStatistics::query_not_stored,
true}));
EXPECT_THAT(
mock_kvstore->request_log.pop_all(),
::testing::UnorderedElementsAreArray({
MatchesJson({{"type", "list"},
{"range", {"1_1_1/3-4_8-9_", "1_1_1/3-4_8-9`"}},
{"strip_prefix_length", 6}}),
MatchesJson({{"type", "list"},
{"range", {"1_1_1/3-4_9-10_", "1_1_1/3-4_9-10`"}},
{"strip_prefix_length", 6}}),
MatchesJson({{"type", "list"},
{"range", {"1_1_1/3-4_10-11_", "1_1_1/3-4_14-15`"}},
{"strip_prefix_length", 6}}),
}));
}
TEST_F(StorageStatisticsTest, Sharded) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open(
{
{"driver", "neuroglancer_precomputed"},
{"kvstore", {{"driver", "mock_key_value_store"}}},
},
Schema::Shape({100, 100, 100, 1}), dtype_v<uint8_t>,
ChunkLayout::ReadChunkShape({1, 1, 1, 1}),
ChunkLayout::WriteChunkShape({8, 8, 8, 1}), context,
tensorstore::OpenMode::create)
.result());
mock_kvstore->request_log.pop_all();
EXPECT_THAT(tensorstore::GetStorageStatistics(
store | tensorstore::Dims(0).HalfOpenInterval(8, 15),
ArrayStorageStatistics::query_not_stored)
.result(),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
} |
652 | cpp | google/tensorstore | path | tensorstore/internal/path.cc | tensorstore/internal/path_test.cc | #ifndef TENSORSTORE_INTERNAL_PATH_H_
#define TENSORSTORE_INTERNAL_PATH_H_
#include <initializer_list>
#include <string>
#include <string_view>
namespace tensorstore {
namespace internal_path {
std::string JoinPathImpl(std::initializer_list<std::string_view> paths);
}
namespace internal {
template <typename... T>
std::string JoinPath(const T&... args) {
return internal_path::JoinPathImpl({args...});
}
std::pair<std::string_view, std::string_view> PathDirnameBasename(
std::string_view path);
void AppendPathComponent(std::string& path, std::string_view component);
void EnsureDirectoryPath(std::string& path);
void EnsureNonDirectoryPath(std::string& path);
}
}
#endif
#include "tensorstore/internal/path.h"
#include <initializer_list>
#include <string>
#include <string_view>
#include "absl/strings/str_cat.h"
namespace {
#ifdef _WIN32
constexpr inline bool IsDirSeparator(char c) { return c == '\\' || c == '/'; }
#else
constexpr inline bool IsDirSeparator(char c) { return c == '/'; }
#endif
}
namespace tensorstore {
namespace internal_path {
std::string JoinPathImpl(std::initializer_list<std::string_view> paths) {
size_t s = 0;
for (std::string_view path : paths) {
s += path.size() + 1;
}
std::string result;
result.reserve(s);
for (std::string_view path : paths) {
internal::AppendPathComponent(result, path);
}
return result;
}
}
namespace internal {
std::pair<std::string_view, std::string_view> PathDirnameBasename(
std::string_view path) {
size_t pos = path.size();
while (pos != 0 && !IsDirSeparator(path[pos - 1])) {
--pos;
}
size_t basename = pos;
--pos;
if (pos == std::string_view::npos) {
return {"", path};
}
while (pos != 0 && IsDirSeparator(path[pos - 1])) {
--pos;
}
if (pos == 0) {
return {"/", path.substr(basename)};
}
return {path.substr(0, pos), path.substr(basename)};
}
void EnsureDirectoryPath(std::string& path) {
if (path.size() == 1 && path[0] == '/') {
path.clear();
} else if (!path.empty() && path.back() != '/') {
path += '/';
}
}
void EnsureNonDirectoryPath(std::string& path) {
size_t size = path.size();
while (size > 0 && path[size - 1] == '/') {
--size;
}
path.resize(size);
}
void AppendPathComponent(std::string& path, std::string_view component) {
if (!path.empty() && path.back() != '/' && !component.empty() &&
component.front() != '/') {
absl::StrAppend(&path, "/", component);
} else {
path += component;
}
}
}
} | #include "tensorstore/internal/path.h"
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::EnsureDirectoryPath;
using ::tensorstore::internal::EnsureNonDirectoryPath;
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::PathDirnameBasename;
TEST(PathTest, JoinPath) {
EXPECT_EQ("/foo/bar", JoinPath("/foo", "bar"));
EXPECT_EQ("/foo/bar", JoinPath("/foo/", "bar"));
EXPECT_EQ("/foo/bar", JoinPath("/foo", "/bar"));
EXPECT_EQ("/foo
EXPECT_EQ("foo/bar", JoinPath("foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "/bar"));
EXPECT_EQ("/bar", JoinPath("", "/bar"));
EXPECT_EQ("bar", JoinPath("", "bar"));
EXPECT_EQ("/foo", JoinPath("/foo", ""));
EXPECT_EQ("/foo/bar/baz
JoinPath("/foo/bar/baz/", "/blah/blink/biz"));
EXPECT_EQ("/foo/bar/baz/blah", JoinPath("/foo", "bar", "baz", "blah"));
EXPECT_EQ("http:
}
TEST(PathTest, JoinPath_MixedArgs) {
constexpr const char kFoo[] = "/foo";
std::string_view foo_view("/foo");
std::string foo("/foo");
EXPECT_EQ("/foo/bar", JoinPath(foo_view, "bar"));
EXPECT_EQ("/foo/bar", JoinPath(foo, "bar"));
EXPECT_EQ("/foo/bar", JoinPath(kFoo, "/bar"));
}
TEST(PathTest, PathDirnameBasename) {
EXPECT_EQ("/a/b", PathDirnameBasename("/a/b/bar").first);
EXPECT_EQ("bar", PathDirnameBasename("/a/b/bar").second);
EXPECT_EQ("a/b", PathDirnameBasename("a/b/bar").first);
EXPECT_EQ("bar", PathDirnameBasename("a/b/bar").second);
EXPECT_EQ("", PathDirnameBasename("bar").first);
EXPECT_EQ("bar", PathDirnameBasename("bar").second);
EXPECT_EQ("/", PathDirnameBasename("/bar").first);
EXPECT_EQ("bar", PathDirnameBasename("/bar").second);
EXPECT_EQ("
EXPECT_EQ("bar", PathDirnameBasename("
EXPECT_EQ("/", PathDirnameBasename("
EXPECT_EQ("bar", PathDirnameBasename("
}
TEST(EnsureDirectoryPathTest, EmptyString) {
std::string path = "";
EnsureDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureDirectoryPathTest, SingleSlash) {
std::string path = "/";
EnsureDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureDirectoryPathTest, NonEmptyWithoutSlash) {
std::string path = "abc";
EnsureDirectoryPath(path);
EXPECT_EQ("abc/", path);
}
TEST(EnsureDirectoryPathTest, NonEmptyWithSlash) {
std::string path = "abc/";
EnsureDirectoryPath(path);
EXPECT_EQ("abc/", path);
}
TEST(EnsureNonDirectoryPathTest, EmptyString) {
std::string path = "";
EnsureNonDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureNonDirectoryPathTest, SingleSlash) {
std::string path = "/";
EnsureNonDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureNonDirectoryPathTest, NonEmptyWithoutSlash) {
std::string path = "abc";
EnsureNonDirectoryPath(path);
EXPECT_EQ("abc", path);
}
TEST(EnsureNonDirectoryPathTest, NonEmptyWithSlash) {
std::string path = "abc/";
EnsureNonDirectoryPath(path);
EXPECT_EQ("abc", path);
}
TEST(EnsureNonDirectoryPathTest, NonEmptyWithSlashes) {
std::string path = "abc
EnsureNonDirectoryPath(path);
EXPECT_EQ("abc", path);
}
} |
653 | cpp | google/tensorstore | uri_utils | tensorstore/internal/uri_utils.cc | tensorstore/internal/uri_utils_test.cc | #ifndef TENSORSTORE_INTERNAL_URI_UTILS_H_
#define TENSORSTORE_INTERNAL_URI_UTILS_H_
#include <cstdint>
#include <string>
#include <string_view>
namespace tensorstore {
namespace internal {
class AsciiSet {
public:
constexpr AsciiSet() : bitvec_{0, 0} {}
constexpr AsciiSet(std::string_view s) : bitvec_{0, 0} {
for (char c : s) {
Set(c);
}
}
constexpr void Set(char c) {
auto uc = static_cast<unsigned char>(c);
bitvec_[(uc & 64) ? 1 : 0] |= static_cast<uint64_t>(1) << (uc & 63);
}
constexpr bool Test(char c) const {
auto uc = static_cast<unsigned char>(c);
if (uc >= 128) return false;
return (bitvec_[(uc & 64) ? 1 : 0] >> (uc & 63)) & 1;
}
private:
uint64_t bitvec_[2];
};
static inline constexpr AsciiSet kUriUnreservedChars{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.!~*'()"};
static inline constexpr AsciiSet kUriPathUnreservedChars{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.!~*'():@&=+$,;/"};
void PercentEncodeReserved(std::string_view src, std::string& dest,
AsciiSet unreserved);
inline std::string PercentEncodeReserved(std::string_view src,
AsciiSet unreserved) {
std::string dest;
PercentEncodeReserved(src, dest, unreserved);
return dest;
}
inline std::string PercentEncodeUriPath(std::string_view src) {
return PercentEncodeReserved(src, kUriPathUnreservedChars);
}
inline std::string PercentEncodeUriComponent(std::string_view src) {
return PercentEncodeReserved(src, kUriUnreservedChars);
}
void PercentDecodeAppend(std::string_view src, std::string& dest);
inline std::string PercentDecode(std::string_view src) {
std::string dest;
PercentDecodeAppend(src, dest);
return dest;
}
struct ParsedGenericUri {
std::string_view scheme;
std::string_view authority_and_path;
std::string_view authority;
std::string_view path;
std::string_view query;
std::string_view fragment;
};
ParsedGenericUri ParseGenericUri(std::string_view uri);
}
}
#endif
#include "tensorstore/internal/uri_utils.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <string_view>
#include "absl/strings/ascii.h"
namespace tensorstore {
namespace internal {
namespace {
inline int HexDigitToInt(char c) {
assert(absl::ascii_isxdigit(c));
int x = static_cast<unsigned char>(c);
if (x > '9') {
x += 9;
}
return x & 0xf;
}
inline char IntToHexDigit(int x) {
assert(x >= 0 && x < 16);
return "0123456789ABCDEF"[x];
}
}
void PercentEncodeReserved(std::string_view src, std::string& dest,
AsciiSet unreserved) {
size_t num_escaped = 0;
for (char c : src) {
if (!unreserved.Test(c)) ++num_escaped;
}
if (num_escaped == 0) {
dest = src;
return;
}
dest.clear();
dest.reserve(src.size() + 2 * num_escaped);
for (char c : src) {
if (unreserved.Test(c)) {
dest += c;
} else {
dest += '%';
dest += IntToHexDigit(static_cast<unsigned char>(c) / 16);
dest += IntToHexDigit(static_cast<unsigned char>(c) % 16);
}
}
}
void PercentDecodeAppend(std::string_view src, std::string& dest) {
dest.reserve(dest.size() + src.size());
for (size_t i = 0; i < src.size();) {
char c = src[i];
char x, y;
if (c != '%' || i + 2 >= src.size() ||
!absl::ascii_isxdigit((x = src[i + 1])) ||
!absl::ascii_isxdigit((y = src[i + 2]))) {
dest += c;
++i;
continue;
}
dest += static_cast<char>(HexDigitToInt(x) * 16 + HexDigitToInt(y));
i += 3;
}
}
ParsedGenericUri ParseGenericUri(std::string_view uri) {
static constexpr std::string_view kSchemeSep(":
ParsedGenericUri result;
const auto scheme_start = uri.find(kSchemeSep);
std::string_view uri_suffix;
if (scheme_start == std::string_view::npos) {
uri_suffix = uri;
} else {
result.scheme = uri.substr(0, scheme_start);
uri_suffix = uri.substr(scheme_start + kSchemeSep.size());
}
const auto fragment_start = uri_suffix.find('#');
const auto query_start = uri_suffix.substr(0, fragment_start).find('?');
const auto path_end = std::min(query_start, fragment_start);
result.authority_and_path = uri_suffix.substr(0, path_end);
if (const auto path_start = result.authority_and_path.find('/');
path_start == 0 || result.authority_and_path.empty()) {
result.authority = {};
result.path = result.authority_and_path;
} else if (path_start != std::string_view::npos) {
result.authority = result.authority_and_path.substr(0, path_start);
result.path = result.authority_and_path.substr(path_start);
} else {
result.authority = result.authority_and_path;
result.path = {};
}
if (query_start != std::string_view::npos) {
result.query =
uri_suffix.substr(query_start + 1, fragment_start - query_start - 1);
}
if (fragment_start != std::string_view::npos) {
result.fragment = uri_suffix.substr(fragment_start + 1);
}
return result;
}
}
} | #include "tensorstore/internal/uri_utils.h"
#include <string_view>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
using ::tensorstore::internal::AsciiSet;
using ::tensorstore::internal::ParseGenericUri;
using ::tensorstore::internal::PercentDecode;
using ::tensorstore::internal::PercentEncodeReserved;
using ::tensorstore::internal::PercentEncodeUriComponent;
using ::tensorstore::internal::PercentEncodeUriPath;
namespace {
TEST(PercentDecodeTest, NoOp) {
std::string_view s = "abcd %zz %%";
EXPECT_THAT(PercentDecode(s), ::testing::Eq(s));
}
TEST(PercentDecodeTest, EscapeSequenceInMiddle) {
EXPECT_THAT(PercentDecode("abc%20efg"), ::testing::Eq("abc efg"));
}
TEST(PercentDecodeTest, EscapeSequenceAtEnd) {
EXPECT_THAT(PercentDecode("abc%20"), ::testing::Eq("abc "));
}
TEST(PercentDecodeTest, EscapeSequenceLetter) {
EXPECT_THAT(PercentDecode("abc%fF"), ::testing::Eq("abc\xff"));
}
TEST(PercentEncodeReservedTest, Basic) {
constexpr AsciiSet kMyUnreservedChars{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789/."};
std::string_view s =
"abcdefghijklmnopqrstuvwxyz"
"/ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"/01234.56789";
EXPECT_THAT(PercentEncodeReserved(s, kMyUnreservedChars), ::testing::Eq(s));
std::string_view t = "-_!~*'()";
EXPECT_THAT(PercentEncodeReserved(t, kMyUnreservedChars),
::testing::Eq("%2D%5F%21%7E%2A%27%28%29"));
}
TEST(PercentEncodeUriPathTest, NoOp) {
std::string_view s =
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.!~*'():@&=+$,;/";
EXPECT_THAT(PercentEncodeUriPath(s), ::testing::Eq(s));
}
TEST(PercentEncodeUriPathTest, Percent) {
EXPECT_THAT(PercentEncodeUriPath("%"), ::testing::Eq("%25"));
}
TEST(PercentEncodeUriPathTest, NonAscii) {
EXPECT_THAT(PercentEncodeUriPath("\xff"), ::testing::Eq("%FF"));
}
TEST(PercentEncodeUriComponentTest, NoOp) {
std::string_view s =
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.!~*'()";
EXPECT_THAT(PercentEncodeUriComponent(s), ::testing::Eq(s));
}
TEST(PercentEncodeUriComponentTest, Percent) {
EXPECT_THAT(PercentEncodeUriComponent("%"), ::testing::Eq("%25"));
}
TEST(PercentEncodeUriComponentTest, NonAscii) {
EXPECT_THAT(PercentEncodeUriComponent("\xff"), ::testing::Eq("%FF"));
}
TEST(ParseGenericUriTest, PathOnly) {
auto parsed = ParseGenericUri("/abc/def");
EXPECT_EQ("", parsed.scheme);
EXPECT_EQ("/abc/def", parsed.authority_and_path);
EXPECT_EQ("", parsed.authority);
EXPECT_EQ("/abc/def", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, GsScheme) {
auto parsed = ParseGenericUri("gs:
EXPECT_EQ("gs", parsed.scheme);
EXPECT_EQ("bucket/path", parsed.authority_and_path);
EXPECT_EQ("bucket", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityNoPath) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityRootPath) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathQuery) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("query", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathFragment) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("fragment", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathQueryFragment) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("query", parsed.query);
EXPECT_EQ("fragment", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathFragmentQuery) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("fragment?query", parsed.fragment);
}
TEST(ParseGenericUriTest, S3Scheme) {
auto parsed = ParseGenericUri("s3:
EXPECT_EQ("s3", parsed.scheme);
EXPECT_EQ("bucket/path", parsed.authority_and_path);
EXPECT_EQ("bucket", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, Basic) {
static constexpr std::pair<std::string_view, std::string_view> kCases[] = {
{"http:
{"http:
{"http:
{"http:
{"http:
{"http:
{"http:
};
for (const auto& [uri, authority] : kCases) {
EXPECT_THAT(ParseGenericUri(uri).authority, ::testing::Eq(authority));
}
}
} |
654 | cpp | google/tensorstore | async_cache | tensorstore/internal/cache/async_cache.cc | tensorstore/internal/cache/async_cache_test.cc | #ifndef TENSORSTORE_INTERNAL_CACHE_ASYNC_CACHE_H_
#define TENSORSTORE_INTERNAL_CACHE_ASYNC_CACHE_H_
#include <stddef.h>
#include <atomic>
#include <memory>
#include <mutex>
#include <type_traits>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/tagged_ptr.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#ifndef TENSORSTORE_ASYNC_CACHE_DEBUG
#define TENSORSTORE_ASYNC_CACHE_DEBUG 0
#endif
namespace tensorstore {
namespace internal {
class AsyncCache : public Cache {
public:
class Entry;
class TransactionNode;
struct ReadRequestState;
using ReadData = void;
struct ReadState {
std::shared_ptr<const void> data;
TimestampedStorageGeneration stamp;
static const ReadState& Unknown();
};
struct ReadRequestState {
Promise<void> issued;
Promise<void> queued;
absl::Time issued_time;
absl::Time queued_time = absl::InfinitePast();
ReadState read_state;
bool known_to_be_stale = false;
bool queued_request_is_deferred = true;
size_t read_state_size = 0;
};
template <typename ReadData>
class ReadView {
public:
ReadView() : read_state_(&ReadState::Unknown()) {}
explicit ReadView(const ReadState& read_state) : read_state_(&read_state) {}
std::shared_ptr<const ReadData> shared_data() const {
return std::static_pointer_cast<const ReadData>(read_state_->data);
}
const ReadData* data() const {
return static_cast<const ReadData*>(read_state_->data.get());
}
const ReadState& read_state() const { return *read_state_; }
const TimestampedStorageGeneration& stamp() const {
return read_state_->stamp;
}
private:
const ReadState* read_state_;
};
template <typename ReadData>
class ReadLock : public ReadView<ReadData> {
public:
ReadLock() = default;
template <typename DerivedEntryOrNode,
typename = std::enable_if_t<
std::is_base_of_v<Entry, DerivedEntryOrNode> ||
std::is_base_of_v<TransactionNode, DerivedEntryOrNode>>>
explicit ReadLock(DerivedEntryOrNode& entry_or_node)
: ReadView<ReadData>(entry_or_node.LockReadState()),
lock_(GetOwningEntry(entry_or_node).mutex(), std::adopt_lock) {
static_assert(std::is_convertible_v<
const typename DerivedEntryOrNode::OwningCache::ReadData*,
const ReadData*>);
}
private:
UniqueWriterLock<absl::Mutex> lock_;
};
#if 0
template <typename DerivedEntryOrNode,
typename = std::enable_if_t<
std::is_base_of_v<Entry, DerivedEntryOrNode> ||
std::is_base_of_v<TransactionNode, DerivedEntryOrNode>>>
explicit ReadLock(DerivedEntryOrNode& entry_or_node)
-> ReadLock<typename DerivedEntryOrNode::OwningCache::ReadData>;
#endif
template <typename DerivedNode>
class WriteLock {
static_assert(std::is_base_of_v<TransactionNode, DerivedNode>);
public:
explicit WriteLock(internal::OpenTransactionNodePtr<DerivedNode> node,
std::adopt_lock_t)
: node_(std::move(node)) {}
WriteLock(WriteLock&&) = default;
WriteLock(const WriteLock&) = delete;
WriteLock& operator=(WriteLock&&) = default;
WriteLock& operator=(const WriteLock&) = delete;
DerivedNode* operator->() const { return node_.get(); }
DerivedNode& operator*() const { return *node_; }
internal::OpenTransactionNodePtr<DerivedNode> unlock()
ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (node_) {
node_->WriterUnlock();
}
return std::exchange(node_, {});
}
~WriteLock() ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (node_) node_->WriterUnlock();
}
private:
internal::OpenTransactionNodePtr<DerivedNode> node_;
};
struct AsyncCacheReadRequest {
absl::Time staleness_bound = absl::InfiniteFuture();
Batch::View batch;
};
class ABSL_LOCKABLE Entry : public Cache::Entry {
public:
using OwningCache = AsyncCache;
Entry() = default;
template <typename DerivedEntry>
friend std::enable_if_t<std::is_base_of_v<Entry, DerivedEntry>,
DerivedEntry&>
GetOwningEntry(DerivedEntry& entry) {
return entry;
}
Future<const void> Read(AsyncCacheReadRequest request,
bool must_not_be_known_to_be_stale = true);
template <typename DerivedEntry>
friend std::enable_if_t<
std::is_base_of_v<Entry, DerivedEntry>,
Result<OpenTransactionNodePtr<
typename DerivedEntry::OwningCache::TransactionNode>>>
GetTransactionNode(DerivedEntry& entry,
internal::OpenTransactionPtr& transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(auto node,
entry.GetTransactionNodeImpl(transaction));
return internal::static_pointer_cast<
typename DerivedEntry::OwningCache::TransactionNode>(std::move(node));
}
template <typename DerivedEntry>
friend std::enable_if_t<
std::is_base_of_v<Entry, DerivedEntry>,
Result<WriteLock<typename DerivedEntry::OwningCache::TransactionNode>>>
GetWriteLockedTransactionNode(
DerivedEntry& entry, const internal::OpenTransactionPtr& transaction)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
using DerivedNode = typename DerivedEntry::OwningCache::TransactionNode;
while (true) {
auto transaction_copy = transaction;
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, entry.GetTransactionNodeImpl(transaction_copy));
if (node->try_lock()) {
return WriteLock<DerivedNode>(
internal::static_pointer_cast<DerivedNode>(std::move(node)),
std::adopt_lock);
}
}
}
virtual void DoRead(AsyncCacheReadRequest request) = 0;
virtual void ReadSuccess(ReadState&& read_state);
virtual void ReadError(absl::Status error);
virtual size_t ComputeReadDataSizeInBytes(const void* data);
ReadState& LockReadState() ABSL_NO_THREAD_SAFETY_ANALYSIS {
mutex().WriterLock();
return read_request_state_.read_state;
}
Result<OpenTransactionNodePtr<TransactionNode>> GetTransactionNodeImpl(
OpenTransactionPtr& transaction);
#ifdef TENSORSTORE_ASYNC_CACHE_DEBUG
~Entry();
#endif
ReadRequestState read_request_state_;
using TransactionTree =
internal::intrusive_red_black_tree::Tree<TransactionNode,
TransactionNode>;
TransactionTree transactions_;
TransactionNode* committing_transaction_node_{nullptr};
template <typename Sink>
friend void AbslStringify(Sink& sink, const Entry& entry) {
auto& owning_cache = GetOwningCache(const_cast<Entry&>(entry));
absl::Format(&sink, "[%s: entry=%p, key=%s] ",
typeid(owning_cache).name(), &entry,
tensorstore::QuoteString(entry.key()));
}
};
class ABSL_LOCKABLE TransactionNode
: public internal::TransactionState::Node,
public internal::intrusive_red_black_tree::NodeBase<TransactionNode> {
public:
using OwningCache = AsyncCache;
explicit TransactionNode(Entry& entry);
~TransactionNode();
void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
void WriterUnlock() ABSL_UNLOCK_FUNCTION();
void DebugAssertMutexHeld() {
#ifndef NDEBUG
mutex_.AssertHeld();
#endif
}
bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
virtual size_t ComputeWriteStateSizeInBytes();
template <typename DerivedNode>
friend std::enable_if_t<std::is_base_of_v<TransactionNode, DerivedNode>,
typename DerivedNode::OwningCache::Entry&>
GetOwningEntry(DerivedNode& node) {
return static_cast<typename DerivedNode::OwningCache::Entry&>(
*static_cast<Cache::Entry*>(node.associated_data()));
}
template <typename DerivedNode>
friend std::enable_if_t<std::is_base_of_v<TransactionNode, DerivedNode>,
typename DerivedNode::OwningCache&>
GetOwningCache(DerivedNode& node) {
return GetOwningCache(GetOwningEntry(node));
}
virtual absl::Status DoInitialize(
internal::OpenTransactionPtr& transaction);
void SetReadsCommitted() { reads_committed_ = true; }
void MarkSizeUpdated() { size_updated_ = true; }
virtual void Revoke();
bool IsRevoked() { return revoked_.load(std::memory_order_acquire); }
virtual void InvalidateReadState();
Future<const void> Read(AsyncCacheReadRequest request,
bool must_not_be_known_to_be_stale = true);
virtual void DoRead(AsyncCacheReadRequest request) = 0;
virtual void ReadSuccess(ReadState&& read_state);
virtual void ReadError(absl::Status error);
void Commit() override;
virtual void WritebackSuccess(ReadState&& read_state);
virtual void WritebackError();
using ApplyReceiver = AnyReceiver<absl::Status, ReadState>;
struct ApplyOptions {
absl::Time staleness_bound;
enum ApplyMode {
kNormal,
kSpecifyUnchanged,
kValidateOnly,
};
ApplyMode apply_mode = kNormal;
};
virtual void DoApply(ApplyOptions option, ApplyReceiver receiver);
void PrepareForCommit() override;
void Abort() override;
ReadState | #include "tensorstore/internal/cache/async_cache.h"
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Future;
using ::tensorstore::no_transaction;
using ::tensorstore::Transaction;
using ::tensorstore::UniqueWriterLock;
using ::tensorstore::internal::AsyncCache;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::OpenTransactionPtr;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::TransactionState;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal::WeakTransactionNodePtr;
using ::tensorstore::internal_testing::TestConcurrent;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
struct RequestLog {
struct ReadRequest {
AsyncCache::Entry* entry;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
entry->ReadSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) { entry->ReadError(std::move(error)); }
};
struct TransactionReadRequest {
AsyncCache::TransactionNode* node;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
node->ReadSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) { node->ReadError(std::move(error)); }
};
struct WritebackRequest {
AsyncCache::TransactionNode* node;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
node->WritebackSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) {
node->SetError(error);
node->WritebackError();
}
};
tensorstore::internal::ConcurrentQueue<ReadRequest> reads;
tensorstore::internal::ConcurrentQueue<TransactionReadRequest>
transaction_reads;
tensorstore::internal::ConcurrentQueue<WritebackRequest> writebacks;
void HandleWritebacks() {
while (auto req = writebacks.pop_nonblock()) {
req->Success();
}
}
};
class TestCache : public tensorstore::internal::AsyncCache {
using Base = tensorstore::internal::AsyncCache;
public:
using ReadData = size_t;
class Entry : public AsyncCache::Entry {
public:
using OwningCache = TestCache;
auto CreateWriteTransaction(OpenTransactionPtr transaction = {}) {
return GetTransactionNode(*this, transaction).value();
}
Future<const void> CreateWriteTransactionFuture(
OpenTransactionPtr transaction = {}) {
return CreateWriteTransaction(std::move(transaction))
->transaction()
->future();
}
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).log_->reads.push(RequestLog::ReadRequest{this});
}
size_t ComputeReadDataSizeInBytes(const void* data) override {
return *static_cast<const size_t*>(data);
}
absl::Status do_initialize_transaction_error;
bool share_implicit_transaction_nodes = true;
};
class TransactionNode : public Base::TransactionNode {
public:
using OwningCache = TestCache;
using Base::TransactionNode::TransactionNode;
absl::Status DoInitialize(OpenTransactionPtr& transaction) override {
TENSORSTORE_RETURN_IF_ERROR(
this->Base::TransactionNode::DoInitialize(transaction));
auto& entry = GetOwningEntry(*this);
++value;
SetReadsCommitted();
return entry.do_initialize_transaction_error;
}
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).log_->transaction_reads.push(
RequestLog::TransactionReadRequest{this});
}
void Commit() override {
GetOwningCache(*this).log_->writebacks.push(
RequestLog::WritebackRequest{this});
Base::TransactionNode::Commit();
}
size_t ComputeWriteStateSizeInBytes() override { return size; }
int value = 0;
size_t size = 0;
};
TestCache(RequestLog* log) : log_(log) {}
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
private:
RequestLog* log_;
};
TEST(AsyncCacheTest, ReadBasic) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
absl::Time read_time1, read_time2;
{
auto init_time = absl::Now();
auto read_future = entry->Read({init_time});
ASSERT_FALSE(read_future.ready());
{
auto read_future2 = entry->Read({init_time});
EXPECT_TRUE(HaveSameSharedState(read_future, read_future2));
}
ASSERT_EQ(1u, log.reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time1 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(absl::InfinitePast(),
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time1);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
{
auto read_future3 = entry->Read({read_time1});
ASSERT_TRUE(read_future3.ready());
TENSORSTORE_EXPECT_OK(read_future3);
ASSERT_TRUE(log.reads.empty());
ASSERT_TRUE(log.writebacks.empty());
}
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1u, log.reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time2 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time1,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = UniqueNow();
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
EXPECT_FALSE(HaveSameSharedState(read_future, read_future1));
{
auto read_future2 = entry->Read({absl::InfiniteFuture()});
EXPECT_TRUE(HaveSameSharedState(read_future1, read_future2));
}
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time2,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto read_time2 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
auto read_future1 = entry->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = absl::Now();
{
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
{
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
auto read_future1 = entry->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, ReadFailed) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
const auto read_status = absl::UnknownError("read failed");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Error(read_status);
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
EXPECT_EQ(read_status, read_future.status());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, ReadFailedAfterSuccessfulRead) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
const auto read_status = absl::UnknownError("read failed");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Error(read_status);
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
EXPECT_EQ(read_status, read_future.status());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, NonTransactionalWrite) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
Future<const void> write_future;
{
auto node = entry->CreateWriteTransaction();
weak_node.reset(node.get());
write_future = node->transaction()->future();
}
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, NonTransactionalWriteback) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto write_future = entry->CreateWriteTransactionFuture();
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
auto write_time = absl::Now();
{
auto write_req = log.writebacks.pop();
write_req.Success(write_time);
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_future = entry->Read({write_time});
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
EXPECT_FALSE(read_future.ready());
auto read_req = log.reads.pop();
read_req.Success();
EXPECT_TRUE(read_future.ready());
}
}
TEST(AsyncCacheTest, WritebackRequestedWithReadIssued) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto write_future = entry->CreateWriteTransactionFuture();
write_future.Force();
ASSERT_FALSE(write_future.ready());
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_FALSE(write_future.ready());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_ASSERT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, WritebackRequestedByCache) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto write_future = entry->CreateWriteTransactionFuture();
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, TransactionalReadBasic) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node.reset(node.get());
}
absl::Time read_time1, read_time2;
auto commit_future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
{
auto init_time = absl::Now();
auto read_future = weak_node->Read({init_time});
ASSERT_FALSE(read_future.ready());
{
auto read_future2 = weak_node->Read({init_time});
EXPECT_TRUE(HaveSameSharedState(read_future, read_future2));
}
ASSERT_EQ(1u, log.transaction_reads.size());
read_time1 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(absl::InfinitePast(),
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time1);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
{
auto read_future3 = weak_node->Read({read_time1});
ASSERT_TRUE(read_future3.ready());
TENSORSTORE_EXPECT_OK(read_future3);
ASSERT_TRUE(log.transaction_reads.empty());
ASSERT_TRUE(log.writebacks.empty());
}
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1u, log.transaction_reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time2 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time1,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = UniqueNow();
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
EXPECT_FALSE(HaveSameSharedState(read_future, read_future1));
{
auto read_future2 = weak_node->Read({absl::InfiniteFuture()});
EXPECT_TRUE(HaveSameSharedState(read_future1, read_future2));
}
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time2,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto read_time2 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = absl::Now();
{
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
{
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
write_req.Success();
ASSERT_TRUE(commit_future.ready());
TENSORSTORE_EXPECT_OK(commit_future);
}
TEST(AsyncCacheTest, TransactionalWritebackSuccess) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node.reset(node.get());
}
auto future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Success();
}
ASSERT_TRUE(future.ready());
TENSORSTORE_EXPECT_OK(future);
}
TEST(AsyncCacheTest, TransactionalWritebackError) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
weak_node.reset(entry->CreateWriteTransaction(open_transaction).get());
}
auto future = transaction.CommitAsync();
auto error = absl::UnknownError("write error");
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Error(error);
}
ASSERT_TRUE(future.ready());
EXPECT_EQ(error, future.status());
}
TEST(AsyncCacheTest, ConcurrentTransactionCommit) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
static constexpr size_t kNumEntries = 2;
tensorstore::internal::PinnedCacheEntry<TestCache> entries[kNumEntries];
for (size_t i = 0; i < kNumEntries; ++i) {
entries[i] = GetCacheEntry(cache, tensorstore::StrCat(i));
}
static constexpr size_t kNumTransactions = 3;
std::vector<Transaction> transactions(kNumTransactions, no_transaction);
TestConcurrent<kNumTransactions>(
100,
[&] {
for (size_t i = 0; i < kNumTransactions; ++i) {
auto& transaction = transactions[i];
transaction = Transaction(tensorstore::atomic_isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
transaction));
for (size_t j = 0; j < kNumEntries; ++j) {
entries[(i + j) % kNumEntries]->CreateWriteTransaction(
open_transaction);
}
ASSERT_FALSE(transaction.future().ready());
}
},
[&] {
TransactionState* expected_transactions[kNumTransactions];
for (size_t i = 0; i < kNumTransactions; ++i) {
auto& transaction = transactions[i];
ASSERT_TRUE(transaction.commit_started());
ASSERT_FALSE(transaction.future().ready());
expected_transactions[i] = TransactionState::get(transaction);
}
TransactionState* transaction_order[kNumTransactions];
for (size_t i = 0; i < kNumTransactions; ++i) {
PinnedCacheEntry<TestCache> entry_order[kNumEntries];
ASSERT_EQ(kNumEntries, log.writebacks.size());
for (size_t j = 0; j < kNumEntries; ++j) {
auto write_req = log.writebacks.pop();
entry_order[j].reset(static_cast<TestCache::Entry*>(
&GetOwningEntry(*write_req.node)));
if (j == 0) {
transaction_order[i] = write_req.node->transaction();
} else {
ASSERT_EQ(transaction_order[i], write_req.node->transaction());
}
write_req.Success();
}
EXPECT_THAT(entry_order,
::testing::UnorderedElementsAreArray(entries));
}
EXPECT_THAT(transaction_order, ::testing::UnorderedElementsAreArray(
expected_transactions));
for (auto& transaction : transactions) {
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_ASSERT_OK(transaction.future());
transaction = no_transaction;
}
},
[&](size_t i) { transactions[i].CommitAsync().IgnoreFuture(); });
}
TEST(AsyncCacheTest, DoInitializeTransactionError) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
entry->do_initialize_transaction_error = absl::UnknownError("initialize");
{
OpenTransactionPtr transaction;
EXPECT_THAT(
GetTransactionNode(*entry, transaction).status(),
tensorstore::MatchesStatus(absl::StatusCode::kUnknown, "initialize.*"));
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
Transaction(tensorstore::isolated)));
EXPECT_THAT(
GetTransactionNode(*entry, transaction).status(),
tensorstore::MatchesStatus(absl::StatusCode::kUnknown, "initialize.*"));
}
}
TEST(AsyncCacheTest, ConcurrentInitializeExplicitTransaction) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
OpenTransactionPtr open_transaction;
TestConcurrent<2>(
100,
[&] {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
Transaction(tensorstore::isolated)));
},
[] {},
[&](size_t i) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto node, GetTransactionNode(*entry, open_transaction));
EXPECT_EQ(1, node->value);
});
}
TEST(AsyncCacheTest, ConcurrentInitializeImplicitTransaction) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
TestConcurrent<2>(
100,
[] {},
[&] { log.HandleWritebacks(); },
[&](size_t i) {
OpenTransactionPtr transaction;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto node, GetTransactionNode(*entry, transaction));
EXPECT_EQ(1, node->va |
655 | cpp | google/tensorstore | cache_pool_resource | tensorstore/internal/cache/cache_pool_resource.cc | tensorstore/internal/cache/cache_pool_resource_test.cc | #ifndef TENSORSTORE_INTERNAL_CACHE_CACHE_POOL_RESOURCE_H_
#define TENSORSTORE_INTERNAL_CACHE_CACHE_POOL_RESOURCE_H_
#include "tensorstore/internal/cache/cache.h"
namespace tensorstore {
namespace internal {
struct CachePoolResource {
static constexpr char id[] = "cache_pool";
using Resource = CachePool::WeakPtr;
};
}
}
#endif
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
namespace {
struct CachePoolResourceTraits
: public ContextResourceTraits<CachePoolResource> {
using Spec = CachePool::Limits;
using Resource = typename CachePoolResource::Resource;
static constexpr Spec Default() { return {}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(
jb::Member("total_bytes_limit",
jb::Projection(&Spec::total_bytes_limit,
jb::DefaultValue([](auto* v) { *v = 0; }))));
}
static Result<Resource> Create(const Spec& limits,
ContextResourceCreationContext context) {
return CachePool::WeakPtr(CachePool::Make(limits));
}
static Spec GetSpec(const Resource& pool, const ContextSpecBuilder& builder) {
return pool->limits();
}
static void AcquireStrongReference(const Resource& p) {
internal_cache::StrongPtrTraitsCachePool::increment(p.get());
}
static void ReleaseStrongReference(const Resource& p) {
internal_cache::StrongPtrTraitsCachePool::decrement(p.get());
}
};
const ContextResourceRegistration<CachePoolResourceTraits> registration;
}
}
} | #include "tensorstore/internal/cache/cache_pool_resource.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::CachePoolResource;
TEST(CachePoolResourceTest, Default) {
auto resource_spec = Context::Resource<CachePoolResource>::DefaultSpec();
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(0u, (*cache)->limits().total_bytes_limit);
}
TEST(CachePoolResourceTest, EmptyObject) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<CachePoolResource>::FromJson(
::nlohmann::json::object_t{}));
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(0u, (*cache)->limits().total_bytes_limit);
}
TEST(CachePoolResourceTest, TotalBytesLimitOnly) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<CachePoolResource>::FromJson(
{{"total_bytes_limit", 100}}));
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(100u, (*cache)->limits().total_bytes_limit);
}
} |
656 | cpp | google/tensorstore | kvs_backed_cache | tensorstore/internal/cache/kvs_backed_cache.cc | tensorstore/internal/cache/kvs_backed_cache_test.cc | #ifndef TENSORSTORE_INTERNAL_CACHE_KVS_BACKED_CACHE_H_
#define TENSORSTORE_INTERNAL_CACHE_KVS_BACKED_CACHE_H_
#include <stddef.h>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/future_sender.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
void KvsBackedCache_IncrementReadUnchangedMetric();
void KvsBackedCache_IncrementReadChangedMetric();
void KvsBackedCache_IncrementReadErrorMetric();
template <typename Derived, typename Parent>
class KvsBackedCache : public Parent {
static_assert(std::is_base_of_v<AsyncCache, Parent>);
public:
template <typename... U>
explicit KvsBackedCache(kvstore::DriverPtr kvstore_driver, U&&... args)
: Parent(std::forward<U>(args)...) {
SetKvStoreDriver(std::move(kvstore_driver));
}
class TransactionNode;
class Entry : public Parent::Entry {
public:
using OwningCache = KvsBackedCache;
virtual std::string GetKeyValueStoreKey() {
return std::string{this->key()};
}
template <typename EntryOrNode>
struct DecodeReceiverImpl {
EntryOrNode* self_;
TimestampedStorageGeneration stamp_;
void set_error(absl::Status error) {
self_->ReadError(
GetOwningEntry(*self_).AnnotateError(error,
true));
}
void set_cancel() { set_error(absl::CancelledError("")); }
void set_value(std::shared_ptr<const void> data) {
AsyncCache::ReadState read_state;
read_state.stamp = std::move(stamp_);
read_state.data = std::move(data);
self_->ReadSuccess(std::move(read_state));
}
};
template <typename EntryOrNode>
struct ReadReceiverImpl {
EntryOrNode* entry_or_node_;
std::shared_ptr<const void> existing_read_data_;
void set_value(kvstore::ReadResult read_result) {
if (read_result.aborted()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *entry_or_node_
<< "Value has not changed, stamp=" << read_result.stamp;
KvsBackedCache_IncrementReadUnchangedMetric();
entry_or_node_->ReadSuccess(AsyncCache::ReadState{
std::move(existing_read_data_), std::move(read_result.stamp)});
return;
}
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *entry_or_node_ << "DoDecode: " << read_result.stamp;
KvsBackedCache_IncrementReadChangedMetric();
GetOwningEntry(*entry_or_node_)
.DoDecode(std::move(read_result).optional_value(),
DecodeReceiverImpl<EntryOrNode>{
entry_or_node_, std::move(read_result.stamp)});
}
void set_error(absl::Status error) {
KvsBackedCache_IncrementReadErrorMetric();
entry_or_node_->ReadError(GetOwningEntry(*entry_or_node_)
.AnnotateError(error, true));
}
void set_cancel() { ABSL_UNREACHABLE(); }
};
void DoRead(AsyncCache::AsyncCacheReadRequest request) final {
kvstore::ReadOptions kvstore_options;
kvstore_options.staleness_bound = request.staleness_bound;
auto read_state = AsyncCache::ReadLock<void>(*this).read_state();
kvstore_options.generation_conditions.if_not_equal =
std::move(read_state.stamp.generation);
kvstore_options.batch = request.batch;
auto& cache = GetOwningCache(*this);
auto future = cache.kvstore_driver_->Read(this->GetKeyValueStoreKey(),
std::move(kvstore_options));
execution::submit(
std::move(future),
ReadReceiverImpl<Entry>{this, std::move(read_state.data)});
}
using DecodeReceiver =
AnyReceiver<absl::Status,
std::shared_ptr<const typename Derived::ReadData>>;
virtual void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) = 0;
using EncodeReceiver = AnyReceiver<absl::Status, std::optional<absl::Cord>>;
virtual void DoEncode(
std::shared_ptr<const typename Derived::ReadData> read_data,
EncodeReceiver receiver) {
ABSL_UNREACHABLE();
}
absl::Status AnnotateError(const absl::Status& error, bool reading) {
return GetOwningCache(*this).kvstore_driver_->AnnotateError(
this->GetKeyValueStoreKey(), reading ? "reading" : "writing", error);
}
};
class TransactionNode : public Parent::TransactionNode,
public kvstore::ReadModifyWriteSource {
public:
using OwningCache = KvsBackedCache;
using Parent::TransactionNode::TransactionNode;
absl::Status DoInitialize(
internal::OpenTransactionPtr& transaction) override {
TENSORSTORE_RETURN_IF_ERROR(
Parent::TransactionNode::DoInitialize(transaction));
size_t phase;
TENSORSTORE_RETURN_IF_ERROR(
GetOwningCache(*this).kvstore_driver()->ReadModifyWrite(
transaction, phase, GetOwningEntry(*this).GetKeyValueStoreKey(),
std::ref(*this)));
this->SetPhase(phase);
if (this->target_->KvsReadsCommitted()) {
this->SetReadsCommitted();
}
return absl::OkStatus();
}
void DoRead(AsyncCache::AsyncCacheReadRequest request) final {
auto read_state = AsyncCache::ReadLock<void>(*this).read_state();
kvstore::TransactionalReadOptions kvstore_options;
kvstore_options.generation_conditions.if_not_equal =
std::move(read_state.stamp.generation);
kvstore_options.staleness_bound = request.staleness_bound;
kvstore_options.batch = request.batch;
target_->KvsRead(
std::move(kvstore_options),
typename Entry::template ReadReceiverImpl<TransactionNode>{
this, std::move(read_state.data)});
}
using ReadModifyWriteSource = kvstore::ReadModifyWriteSource;
using ReadModifyWriteTarget = kvstore::ReadModifyWriteTarget;
void KvsSetTarget(ReadModifyWriteTarget& target) override {
target_ = ⌖
}
void KvsInvalidateReadState() override {
if (this->target_->KvsReadsCommitted()) {
this->SetReadsCommitted();
}
this->InvalidateReadState();
}
void KvsWriteback(
ReadModifyWriteSource::WritebackOptions options,
ReadModifyWriteSource::WritebackReceiver receiver) override {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "KvsWriteback: if_not_equal="
<< options.generation_conditions.if_not_equal
<< ", staleness_bound=" << options.staleness_bound
<< ", mode=" << options.writeback_mode;
auto read_state = AsyncCache::ReadLock<void>(*this).read_state();
if (!StorageGeneration::IsUnknown(
options.generation_conditions.if_not_equal) &&
options.generation_conditions.if_not_equal ==
read_state.stamp.generation &&
read_state.stamp.time >= options.staleness_bound) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "KvsWriteback: skipping because condition is satisfied";
return execution::set_value(receiver, kvstore::ReadResult::Unspecified(
std::move(read_state.stamp)));
}
if (!StorageGeneration::IsUnknown(require_repeatable_read_) &&
read_state.stamp.time < options.staleness_bound) {
auto read_future = this->Read({options.staleness_bound});
read_future.Force();
read_future.ExecuteWhenReady(
[this, options = std::move(options),
receiver =
std::move(receiver)](ReadyFuture<const void> future) mutable {
this->KvsWriteback(std::move(options), std::move(receiver));
});
return;
}
struct EncodeReceiverImpl {
TransactionNode* self_;
TimestampedStorageGeneration update_stamp_;
ReadModifyWriteSource::WritebackReceiver receiver_;
void set_error(absl::Status error) {
error = GetOwningEntry(*self_).AnnotateError(std::move(error),
false);
execution::set_error(receiver_, std::move(error));
}
void set_cancel() { ABSL_UNREACHABLE(); }
void set_value(std::optional<absl::Cord> value) {
kvstore::ReadResult read_result =
value ? kvstore::ReadResult::Value(std::move(*value),
std::move(update_stamp_))
: kvstore::ReadResult::Missing(std::move(update_stamp_));
execution::set_value(receiver_, std::move(read_result));
}
};
struct ApplyReceiverImpl {
TransactionNode* self_;
StorageGeneration if_not_equal_;
ReadModifyWriteSource::WritebackMode writeback_mode_;
ReadModifyWriteSource::WritebackReceiver receiver_;
void set_error(absl::Status error) {
execution::set_error(receiver_, std::move(error));
}
void set_cancel() { ABSL_UNREACHABLE(); }
void set_value(AsyncCache::ReadState update) {
if (!StorageGeneration::IsUnknown(self_->require_repeatable_read_)) {
if (!StorageGeneration::IsConditional(update.stamp.generation)) {
update.stamp.generation = StorageGeneration::Condition(
update.stamp.generation, self_->require_repeatable_read_);
auto read_stamp = AsyncCache::ReadLock<void>(*self_).stamp();
if (!StorageGeneration::IsUnknown(read_stamp.generation) &&
read_stamp.generation != self_->require_repeatable_read_) {
execution::set_error(receiver_, GetGenerationMismatchError());
return;
}
update.stamp.time = read_stamp.time;
} else if (!StorageGeneration::IsConditionalOn(
update.stamp.generation,
self_->require_repeatable_read_)) {
execution::set_error(receiver_, GetGenerationMismatchError());
return;
}
}
if (!StorageGeneration::NotEqualOrUnspecified(update.stamp.generation,
if_not_equal_)) {
return execution::set_value(
receiver_,
kvstore::ReadResult::Unspecified(std::move(update.stamp)));
}
if (!StorageGeneration::IsInnerLayerDirty(update.stamp.generation) &&
writeback_mode_ !=
ReadModifyWriteSource::kSpecifyUnchangedWriteback) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *self_ << "DoApply: if_not_equal=" << if_not_equal_
<< ", mode=" << writeback_mode_
<< ", unmodified: " << update.stamp;
if (StorageGeneration::IsUnknown(update.stamp.generation)) {
self_->new_data_ = std::nullopt;
} else {
self_->new_data_ = std::move(update.data);
}
return execution::set_value(
receiver_,
kvstore::ReadResult::Unspecified(std::move(update.stamp)));
}
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *self_ << "DoApply: if_not_equal=" << if_not_equal_
<< ", mode=" << writeback_mode_ << ", encoding: " << update.stamp
<< ", commit_started=" << self_->transaction()->commit_started();
self_->new_data_ = update.data;
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *self_ << "DoEncode";
auto update_data =
std::static_pointer_cast<const typename Derived::ReadData>(
std::move(update.data));
GetOwningEntry(*self_).DoEncode(
std::move(update_data),
EncodeReceiverImpl{self_, std::move(update.stamp),
std::move(receiver_)});
}
};
AsyncCache::TransactionNode::ApplyOptions apply_options;
apply_options.staleness_bound = options.staleness_bound;
switch (options.writeback_mode) {
case ReadModifyWriteSource::kValidateOnly:
apply_options.apply_mode =
AsyncCache::TransactionNode::ApplyOptions::kValidateOnly;
break;
case ReadModifyWriteSource::kSpecifyUnchangedWriteback:
apply_options.apply_mode =
AsyncCache::TransactionNode::ApplyOptions::kSpecifyUnchanged;
break;
case ReadModifyWriteSource::kNormalWriteback:
apply_options.apply_mode =
AsyncCache::TransactionNode::ApplyOptions::kNormal;
break;
}
this->DoApply(
std::move(apply_options),
ApplyReceiverImpl{
this, std::move(options.generation_conditions.if_not_equal),
options.writeback_mode, std::move(receiver)});
}
void KvsWritebackSuccess(TimestampedStorageGeneration new_stamp) override {
if (new_data_) {
this->WritebackSuccess(
AsyncCache::ReadState{std::move(*new_data_), std::move(new_stamp)});
} else {
this->WritebackSuccess(AsyncCache::ReadState{});
}
}
void KvsWritebackError() override { this->WritebackError(); }
void KvsRevoke() override { this->Revoke(); }
virtual absl::Status RequireRepeatableRead(
const StorageGeneration& generation) {
this->DebugAssertMutexHeld();
if (!StorageGeneration::IsUnknown(require_repeatable_read_)) {
if (require_repeatable_read_ != generation) {
return GetOwningEntry(*this).AnnotateError(
GetGenerationMismatchError(),
true);
}
} else {
require_repeatable_read_ = generation;
}
return absl::OkStatus();
}
static absl::Status GetGenerationMismatchError() {
return absl::AbortedError("Generation mismatch");
}
private:
friend class KvsBackedCache;
ReadModifyWriteTarget* target_;
std::optional<std::shared_ptr<const void>> new_data_;
StorageGeneration require_repeatable_read_;
};
kvstore::Driver* kvstore_driver() { return kvstore_driver_.get(); }
void SetKvStoreDriver(kvstore::DriverPtr driver) {
if (driver) {
this->SetBatchNestingDepth(driver->BatchNestingDepth() + 1);
}
kvstore_driver_ = std::move(driver);
}
kvstore::DriverPtr kvstore_driver_;
};
}
}
#endif
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/metrics/counter.h"
namespace tensorstore {
namespace internal {
namespace {
auto& kvs_cache_read = internal_metrics::Counter<int64_t, std::string>::New(
"/tensorstore/cache/kvs_cache_read", "category",
"Count of kvs_backed_cache reads by category. A large number of "
"'unchanged' reads indicates that the dataset is relatively quiescent.");
}
void KvsBackedCache_IncrementReadUnchangedMetric() {
static auto& cell = kvs_cache_read.GetCell("unchanged");
cell.Increment();
}
void KvsBackedCache_IncrementReadChangedMetric() {
static auto& cell = kvs_cache_read.GetCell("changed");
cell.Increment();
}
void KvsBackedCache_IncrementReadErrorMetric() {
static auto& cell = kvs_cache_read.GetCell("error");
cell.Increment();
}
}
} | #include <stddef.h>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::KeyRange;
using ::tensorstore::MatchesStatus;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::OpenTransactionPtr;
TENSORSTORE_GLOBAL_INITIALIZER {
using ::tensorstore::internal::KvsBackedCacheBasicTransactionalTestOptions;
using ::tensorstore::internal::RegisterKvsBackedCacheBasicTransactionalTest;
{
KvsBackedCacheBasicTransactionalTestOptions options;
options.test_name = "MemoryNonAtomic";
options.get_store = [] {
return tensorstore::GetMemoryKeyValueStore(false);
};
options.multi_key_atomic_supported = false;
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
{
KvsBackedCacheBasicTransactionalTestOptions options;
options.test_name = "MemoryAtomic";
options.get_store = [] {
return tensorstore::GetMemoryKeyValueStore(true);
};
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
}
class MockStoreTest : public ::testing::Test {
protected:
CachePool::StrongPtr pool = CachePool::Make(CachePool::Limits{});
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr memory_store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::CachePtr<KvsBackedTestCache> GetCache(
std::string cache_identifier = {},
kvstore::DriverPtr kvstore_driver = {}) {
if (!kvstore_driver) kvstore_driver = mock_store;
return tensorstore::internal::GetCache<KvsBackedTestCache>(
pool.get(), cache_identifier,
[&] { return std::make_unique<KvsBackedTestCache>(kvstore_driver); });
}
tensorstore::internal::CachePtr<KvsBackedTestCache> cache = GetCache();
};
TEST_F(MockStoreTest, ReadSuccess) {
auto entry = GetCacheEntry(cache, "a");
auto read_time = absl::Now();
auto read_future = entry->Read({read_time});
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
EXPECT_EQ(tensorstore::OptionalByteRangeRequest{},
read_req.options.byte_range);
EXPECT_EQ(read_time, read_req.options.staleness_bound);
read_req(memory_store);
}
TEST_F(MockStoreTest, ReadError) {
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::Now()});
auto read_req = mock_store->read_requests.pop();
read_req.promise.SetResult(absl::FailedPreconditionError("read error"));
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"a\": read error"));
}
TEST_F(MockStoreTest, WriteError) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
auto write_req = mock_store->write_requests.pop();
write_req.promise.SetResult(absl::FailedPreconditionError("write error"));
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error writing \"a\": write error"));
}
TEST_F(MockStoreTest, ReadErrorDuringWriteback) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
auto read_req = mock_store->read_requests.pop();
read_req.promise.SetResult(absl::FailedPreconditionError("read error"));
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"a\": read error"));
}
TEST_F(MockStoreTest, ReadErrorDueToValidateDuringWriteback) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Validate(
open_transaction, [](absl::Cord data) { return absl::OkStatus(); }));
auto read_future = entry->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(), ::testing::Optional(absl::Cord()));
}
transaction.CommitAsync().IgnoreFuture();
auto read_req = mock_store->read_requests.pop();
read_req.promise.SetResult(absl::FailedPreconditionError("read error"));
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"a\": read error"));
}
TEST_F(MockStoreTest, WriteDuringRead) {
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::InfinitePast()});
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
auto read_future2 = entry->Read({absl::InfinitePast()});
{
auto read_req = mock_store->read_requests.pop();
read_req(memory_store);
TENSORSTORE_ASSERT_OK(read_future);
TENSORSTORE_ASSERT_OK(read_future2);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
TENSORSTORE_ASSERT_OK(transaction.future());
}
}
TEST_F(MockStoreTest, MultiPhaseSeparateKeys) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("x"), "a")
->Modify(open_transaction, false, "abc"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("x"), "b")
->Modify(open_transaction, false, "de"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("y"), "b")
->Modify(open_transaction, false, "f"));
}
transaction.CommitAsync().IgnoreFuture();
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
}
EXPECT_THAT(memory_store->Read("a").result(),
MatchesKvsReadResult(absl::Cord("abc")));
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", write_req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("def", write_req.value);
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
EXPECT_THAT(memory_store->Read("b").result(),
MatchesKvsReadResult(absl::Cord("def")));
}
TEST_F(MockStoreTest, MultiPhaseSameKey) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "def"));
}
transaction.CommitAsync().IgnoreFuture();
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto read_result,
memory_store->Read("a").result());
EXPECT_THAT(read_result, MatchesKvsReadResult(absl::Cord("abc")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto write_stamp, memory_store->Write("a", absl::Cord("xyz")).result());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(read_result.stamp.generation,
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abcdef", write_req.value);
write_req(memory_store);
}
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(read_result.stamp.generation,
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(write_stamp.generation,
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("xyzdef", write_req.value);
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
EXPECT_THAT(memory_store->Read("a").result(),
MatchesKvsReadResult(absl::Cord("xyzdef")));
}
TEST_F(MockStoreTest, MultiPhaseSameKeyAbort) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "def"));
}
transaction.Abort();
}
TEST_F(MockStoreTest, DeleteRangeSingle) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeError) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req.promise.SetResult(absl::FailedPreconditionError("delete range error"));
}
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"delete range error"));
}
TEST_F(MockStoreTest, DeleteRangeAtomicError) {
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
EXPECT_THAT(mock_store->TransactionalDeleteRange(open_transaction,
KeyRange{"a", "c"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot delete range starting at \"a\" as single "
"atomic transaction"));
}
}
TEST_F(MockStoreTest, DeleteRangeMultipleDisjoint) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"d", "f"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("d", "f"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeMultipleOverlapping) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"b", "f"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "f"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeBeforeWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "b"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange(KeyRange::Successor("b"), "c"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
EXPECT_THAT(write_req.value, ::testing::Optional(std::string("abc")));
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeBeforeWriteJustBeforeExclusiveMax) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", KeyRange::Successor("b")}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "b"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeAfterWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeAfterValidateError) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")
->Validate(open_transaction, [](absl::Cord value) {
return absl::FailedPreconditionError("validate error");
}));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
mock_store->read_requests.pop()(memory_store);
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_TRUE(mock_store->delete_range_requests.empty());
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error writing \"b\": validate error"));
}
TEST_F(MockStoreTest, DeleteRangeAfterValidateAndModify) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")
->Validate(open_transaction, [](const absl::Cord& input) {
return absl::OkStatus();
}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_TRUE(mock_store->delete_range_requests.empty());
EXPECT_EQ("b", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, MultiPhaseValidateError) {
auto transaction = Transaction(tensorstore::isolated);
auto entry = GetCacheEntry(cache, "a");
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
open_transaction->Barrier();
auto validator = [](absl::Cord value) {
if (value != "abc") {
return absl::AbortedError("validation");
}
return absl::OkStatus();
};
TENSORSTORE_ASSERT_OK(entry->Validate(open_transaction, validator));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
write_req(memory_store);
}
TENSORSTORE_ASSERT_OK(memory_store->Write("a", absl::Cord("def")));
ASSERT_FALSE(transaction.future().ready());
{
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(tensorstore::OptionalByteRangeRequest(0, 0),
read_req.options.byte_range);
read_req(memory_store);
}
{
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
read_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kAborted));
}
TEST_F(MockStoreTest, MultiPhaseValidateErrorAfterReadValue) {
auto transaction = Transaction(tensorstore::isolated);
auto entry = GetCacheEntry(cache, "a");
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
open_transaction->Barrier();
auto validator = [](absl::Cord value) {
if (value != "abc") {
return absl::AbortedError("validation: " + std::string(value));
}
return absl::OkStatus();
};
TENSORSTORE_ASSERT_OK(entry->Validate(open_transaction, validator));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "xyz"));
TENSORSTORE_ASSERT_OK(entry->Validate(
open_transaction, [](absl::Cord value) { return absl::OkStatus(); }));
EXPECT_THAT(entry->ReadValue(open_transaction).result(),
::testing::Optional(absl::Cord("xyz")));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
write_req(memory_store);
}
TENSORSTORE_ASSERT_OK(memory_store->Write("a", absl::Cord("def")));
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_EQ("a", write_req.key);
write_req(memory_store);
}
{
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
read_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kAborted));
}
TEST_F(MockStoreTest, UnboundedDeleteRangeAfterWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", ""}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", ""), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeThenWriteThenDeleteRange) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "d"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "d"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, MultiPhaseDeleteRangeOverlapEnd) {
const std::vector<std::vector<KeyRange>> test_cases = {
{
KeyRange{"a", "c"},
KeyRange{"a", "c"},
},
{
KeyRange{"a", "c"},
KeyRange{"a", "d"},
},
{
KeyRange{"b", "c"},
KeyRange{"a", "c"},
},
{
KeyRange{"b", "c"},
KeyRange{"a", "d"},
},
{
KeyRange{"a", "d"},
KeyRange{"b", "c"},
},
};
for (const auto& test_case : test_cases) {
SCOPED_TRACE("test_case=" + ::testing::PrintToString(test_case));
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
for (const auto& range : test_case) {
TENSORSTORE_ASSERT_OK(
mock_store->TransactionalDeleteRange(open_transaction, range));
open_transaction->Barrier();
}
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
for (const auto& range : test_case) {
auto req = mock_store->delete_range_requests.pop();
EXPECT_TRUE(mock_store->delete_range_requests.empty());
EXPECT_EQ(range, req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
}
TEST_F(MockStoreTest, MultiPhaseDeleteRangeAndWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "d"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_TRUE(mock_store->delete_range_requests.empty());
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "d"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, MultipleKeyValueStoreAtomicError) {
auto transaction = Transaction(tensorstore::atomic_isolated);
auto mock_store2 = MockKeyValueStore::Make();
{
TENSORSTORE_A |
657 | cpp | google/tensorstore | cache | tensorstore/internal/cache/cache.cc | tensorstore/internal/cache/cache_test.cc | #ifndef TENSORSTORE_INTERNAL_CACHE_CACHE_H_
#define TENSORSTORE_INTERNAL_CACHE_CACHE_H_
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <iosfwd>
#include <memory>
#include <optional>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/functional/function_ref.h"
#include "tensorstore/internal/cache/cache_impl.h"
#include "tensorstore/internal/cache/cache_pool_limits.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/poly/poly.h"
namespace tensorstore {
namespace internal {
class Cache;
template <typename CacheType>
using CachePtr = internal_cache::CachePtr<CacheType>;
class CachePool : private internal_cache::CachePoolImpl {
public:
using Limits = CachePoolLimits;
const Limits& limits() const { return limits_; }
class WeakPtr;
class StrongPtr : private internal_cache::CachePoolStrongPtr {
using Base = internal_cache::CachePoolStrongPtr;
public:
StrongPtr() = default;
explicit StrongPtr(const WeakPtr& ptr);
using Base::operator bool;
using Base::operator->;
using Base::operator*;
using Base::get;
private:
friend class internal_cache::Access;
};
class WeakPtr : private internal_cache::CachePoolWeakPtr {
using Base = internal_cache::CachePoolWeakPtr;
public:
WeakPtr() = default;
explicit WeakPtr(const StrongPtr& ptr) : Base(ptr.get()) {}
explicit WeakPtr(CachePool* ptr) : Base(ptr) {}
using Base::operator bool;
using Base::operator->;
using Base::operator*;
using Base::get;
private:
friend class internal_cache::Access;
};
static StrongPtr Make(const Limits& limits);
private:
using internal_cache::CachePoolImpl::CachePoolImpl;
friend class internal_cache::Access;
};
template <typename CacheType, typename MakeCache>
CachePtr<CacheType> GetCacheWithExplicitTypeInfo(
CachePool* pool, const std::type_info& type_info,
std::string_view cache_key, MakeCache&& make_cache) {
auto cache = internal_cache::GetCacheInternal(
internal_cache::Access::StaticCast<internal_cache::CachePoolImpl>(pool),
type_info, cache_key, [&]() -> std::unique_ptr<internal::Cache> {
std::unique_ptr<CacheType> cache = make_cache();
if (!cache) return nullptr;
void* user_ptr = cache.get();
auto base_ptr = std::unique_ptr<internal::Cache>(
&internal_cache::GetCacheObject(cache.release()));
internal_cache::Access::StaticCast<internal_cache::CacheImpl>(
base_ptr.get())
->user_ptr_ = user_ptr;
return base_ptr;
});
if (!cache) return nullptr;
return CachePtr<CacheType>(
static_cast<CacheType*>(
internal_cache::Access::StaticCast<internal_cache::CacheImpl>(
cache.release())
->user_ptr_),
internal::adopt_object_ref);
}
template <typename CacheType, typename MakeCache>
CachePtr<CacheType> GetCache(CachePool* pool, std::string_view cache_key,
MakeCache&& make_cache) {
return GetCacheWithExplicitTypeInfo<CacheType>(
pool, typeid(CacheType), cache_key, std::forward<MakeCache>(make_cache));
}
using WeakPinnedCacheEntry = internal_cache::WeakPinnedCacheEntry;
class ABSL_LOCKABLE CacheEntry : private internal_cache::CacheEntryImpl {
public:
using OwningCache = internal::Cache;
const std::string_view key() const { return key_; }
uint32_t use_count() const {
return reference_count_.load(std::memory_order_acquire) / 2;
}
absl::Mutex& mutex() { return mutex_; }
void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
void WriterUnlock() ABSL_UNLOCK_FUNCTION();
void DebugAssertMutexHeld() {
#ifndef NDEBUG
mutex_.AssertHeld();
#endif
}
void NotifySizeChanged() {
this->DebugAssertMutexHeld();
flags_ |= kSizeChanged;
}
virtual void DoInitialize();
WeakPinnedCacheEntry AcquireWeakReference() {
return internal_cache::AcquireWeakCacheEntryReference(this);
}
virtual ~CacheEntry();
private:
friend class internal_cache::Access;
};
template <typename CacheType>
using PinnedCacheEntry =
internal_cache::CacheEntryStrongPtr<typename CacheType::Entry>;
class Cache : private internal_cache::CacheImpl {
public:
using Entry = CacheEntry;
using PinnedEntry = PinnedCacheEntry<Cache>;
Cache();
virtual ~Cache();
CachePool* pool() const {
return internal_cache::Access::StaticCast<CachePool>(pool_);
}
size_t use_count() const {
return reference_count_.load() /
internal_cache::CacheImpl::kStrongReferenceIncrement;
}
std::string_view cache_identifier() const { return cache_identifier_; }
virtual Entry* DoAllocateEntry() = 0;
virtual size_t DoGetSizeInBytes(Entry* entry);
virtual size_t DoGetSizeofEntry() = 0;
private:
friend class internal_cache::Access;
};
template <typename Entry>
inline std::enable_if_t<std::is_base_of_v<Cache::Entry, Entry>,
typename Entry::OwningCache&>
GetOwningCache(Entry& entry) {
return *internal_cache::Access::StaticCast<typename Entry::OwningCache>(
internal_cache::Access::StaticCast<internal_cache::CacheEntryImpl>(&entry)
->cache_);
}
template <typename CacheType>
std::enable_if_t<std::is_base_of<Cache, CacheType>::value,
PinnedCacheEntry<CacheType>>
GetCacheEntry(CacheType* cache, std::string_view key) {
return static_pointer_cast<typename CacheType::Entry>(
internal_cache::GetCacheEntryInternal(cache, key));
}
template <typename CacheType>
std::enable_if_t<std::is_base_of<Cache, CacheType>::value,
PinnedCacheEntry<CacheType>>
GetCacheEntry(const CachePtr<CacheType>& cache, std::string_view key) {
return GetCacheEntry(cache.get(), key);
}
}
}
#endif
#include "tensorstore/internal/cache/cache.h"
#include <stddef.h>
#include <stdint.h>
#include <array>
#include <atomic>
#include <bitset>
#include <cassert>
#include <memory>
#include <mutex>
#include <string>
#include <string_view>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/cache/cache_pool_limits.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/type_traits.h"
namespace tensorstore {
namespace internal_cache {
auto& hit_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/hit_count", "Number of cache hits.");
auto& miss_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/miss_count", "Number of cache misses.");
auto& evict_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/evict_count", "Number of evictions from the cache.");
using ::tensorstore::internal::PinnedCacheEntry;
#if !defined(NDEBUG)
inline void DebugAssertMutexHeld(absl::Mutex* mutex) { mutex->AssertHeld(); }
#else
inline void DebugAssertMutexHeld(absl::Mutex* mutex) {}
#endif
using LruListAccessor =
internal::intrusive_linked_list::MemberAccessor<LruListNode>;
CachePoolImpl::CachePoolImpl(const CachePool::Limits& limits)
: limits_(limits),
total_bytes_(0),
strong_references_(1),
weak_references_(1) {
Initialize(LruListAccessor{}, &eviction_queue_);
}
namespace {
inline void AcquireWeakReference(CachePoolImpl* p) {
[[maybe_unused]] auto old_count =
p->weak_references_.fetch_add(1, std::memory_order_relaxed);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:weak:increment", p,
old_count + 1);
}
void ReleaseWeakReference(CachePoolImpl* p) {
auto new_count = --p->weak_references_;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:weak:decrement", p,
new_count);
if (new_count == 0) {
delete Access::StaticCast<CachePool>(p);
}
}
struct DecrementCacheReferenceCount {
explicit DecrementCacheReferenceCount(CacheImpl* cache_impl, size_t amount) {
old_count = cache_impl->reference_count_.fetch_sub(
amount, std::memory_order_acq_rel);
new_count = old_count - amount;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("Cache:decrement", cache_impl,
new_count);
}
bool should_delete() const {
return !CacheImpl::ShouldDelete(old_count) &&
CacheImpl::ShouldDelete(new_count);
}
bool should_release_cache_pool_weak_reference() const {
assert(old_count - new_count == CacheImpl::kStrongReferenceIncrement);
return !CacheImpl::ShouldHoldPoolWeakReference(new_count);
}
size_t old_count, new_count;
};
void UnlinkListNode(LruListNode* node) noexcept {
Remove(LruListAccessor{}, node);
Initialize(LruListAccessor{}, node);
}
void UnregisterEntryFromPool(CacheEntryImpl* entry,
CachePoolImpl* pool) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
UnlinkListNode(entry);
pool->total_bytes_.fetch_sub(entry->num_bytes_, std::memory_order_relaxed);
}
void AddToEvictionQueue(CachePoolImpl* pool, CacheEntryImpl* entry) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
auto* eviction_queue = &pool->eviction_queue_;
if (!OnlyContainsNode(LruListAccessor{}, entry)) {
Remove(LruListAccessor{}, entry);
}
InsertBefore(LruListAccessor{}, eviction_queue, entry);
}
void DestroyCache(CachePoolImpl* pool, CacheImpl* cache);
void MaybeEvictEntries(CachePoolImpl* pool) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
constexpr size_t kBufferSize = 64;
std::array<CacheEntryImpl*, kBufferSize> entries_to_delete;
std::bitset<kBufferSize> should_delete_cache_for_entry;
size_t num_entries_to_delete = 0;
const auto destroy_entries = [&] {
internal::ScopedWriterUnlock unlock(pool->lru_mutex_);
for (size_t i = 0; i < num_entries_to_delete; ++i) {
auto* entry = entries_to_delete[i];
if (should_delete_cache_for_entry[i]) {
DestroyCache(entry->cache_->pool_, entry->cache_);
}
entry->cache_ = nullptr;
delete Access::StaticCast<CacheEntry>(entry);
}
};
while (pool->total_bytes_.load(std::memory_order_acquire) >
pool->limits_.total_bytes_limit) {
auto* queue = &pool->eviction_queue_;
if (queue->next == queue) {
break;
}
auto* entry = static_cast<CacheEntryImpl*>(queue->next);
auto* cache = entry->cache_;
bool evict = false;
bool should_delete_cache = false;
auto& shard = cache->ShardForKey(entry->key_);
if (absl::MutexLock lock(&shard.mutex);
entry->reference_count_.load(std::memory_order_acquire) == 0) {
[[maybe_unused]] size_t erase_count = shard.entries.erase(entry);
assert(erase_count == 1);
if (shard.entries.empty()) {
if (DecrementCacheReferenceCount(cache,
CacheImpl::kNonEmptyShardIncrement)
.should_delete()) {
should_delete_cache = true;
}
}
evict = true;
}
if (!evict) {
UnlinkListNode(entry);
continue;
}
UnregisterEntryFromPool(entry, pool);
evict_count.Increment();
should_delete_cache_for_entry[num_entries_to_delete] = should_delete_cache;
entries_to_delete[num_entries_to_delete++] = entry;
if (num_entries_to_delete == entries_to_delete.size()) {
destroy_entries();
num_entries_to_delete = 0;
}
}
destroy_entries();
}
void InitializeNewEntry(CacheEntryImpl* entry, CacheImpl* cache) noexcept {
entry->cache_ = cache;
entry->reference_count_.store(2, std::memory_order_relaxed);
entry->num_bytes_ = 0;
Initialize(LruListAccessor{}, entry);
}
void DestroyCache(CachePoolImpl* pool,
CacheImpl* cache) ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (pool) {
if (!cache->cache_identifier_.empty()) {
absl::MutexLock lock(&pool->caches_mutex_);
auto it = pool->caches_.find(cache);
if (it != pool->caches_.end() && *it == cache) {
pool->caches_.erase(it);
}
}
if (HasLruCache(pool)) {
absl::MutexLock lru_lock(&pool->lru_mutex_);
for (auto& shard : cache->shards_) {
absl::MutexLock lock(&shard.mutex);
for (CacheEntryImpl* entry : shard.entries) {
entry->reference_count_.fetch_add(2, std::memory_order_acq_rel);
UnregisterEntryFromPool(entry, pool);
}
}
} else {
for (auto& shard : cache->shards_) {
absl::MutexLock lock(&shard.mutex);
for (CacheEntryImpl* entry : shard.entries) {
entry->reference_count_.fetch_add(2, std::memory_order_acq_rel);
}
}
}
for (auto& shard : cache->shards_) {
for (CacheEntryImpl* entry : shard.entries) {
assert(entry->reference_count_.load() >= 2 &&
entry->reference_count_.load() <= 3);
delete Access::StaticCast<Cache::Entry>(entry);
}
}
}
delete Access::StaticCast<Cache>(cache);
}
template <typename T, typename LockFn>
inline UniqueWriterLock<absl::Mutex> DecrementReferenceCountWithLock(
std::atomic<T>& reference_count, LockFn mutex_fn, T& new_count,
internal::type_identity_t<T> decrease_amount,
internal::type_identity_t<T> lock_threshold) {
static_assert(std::is_invocable_v<LockFn>);
static_assert(std::is_same_v<absl::Mutex&, std::invoke_result_t<LockFn>>);
{
auto count = reference_count.load(std::memory_order_relaxed);
while (true) {
if (count <= lock_threshold + decrease_amount) break;
if (reference_count.compare_exchange_weak(count, count - decrease_amount,
std::memory_order_acq_rel)) {
new_count = count - decrease_amount;
return {};
}
}
}
UniqueWriterLock lock(mutex_fn());
auto count =
reference_count.fetch_sub(decrease_amount, std::memory_order_acq_rel) -
decrease_amount;
new_count = count;
if (count > lock_threshold) {
return {};
}
return lock;
}
}
void StrongPtrTraitsCacheEntry::decrement(CacheEntry* p) noexcept
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto* entry = Access::StaticCast<CacheEntryImpl>(p);
auto* cache = entry->cache_;
uint32_t new_count;
if (auto* pool_impl = cache->pool_) {
if (pool_impl->limits_.total_bytes_limit == 0) {
CacheImpl::Shard* shard = nullptr;
auto lock = DecrementReferenceCountWithLock(
entry->reference_count_,
[&]() -> absl::Mutex& {
shard = &cache->ShardForKey(entry->key_);
return shard->mutex;
},
new_count,
2, 1);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement", p,
new_count);
if (!lock) return;
if (new_count == 0) {
shard->entries.erase(entry);
if (shard->entries.empty()) {
cache->reference_count_.fetch_sub(CacheImpl::kNonEmptyShardIncrement,
std::memory_order_relaxed);
}
delete p;
}
} else {
auto lock = DecrementReferenceCountWithLock(
entry->reference_count_,
[pool_impl]() -> absl::Mutex& { return pool_impl->lru_mutex_; },
new_count,
2, 1);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement", p,
new_count);
if (!lock) return;
if (new_count == 0) {
AddToEvictionQueue(pool_impl, entry);
MaybeEvictEntries(pool_impl);
}
}
assert(new_count <= 1);
} else {
new_count =
entry->reference_count_.fetch_sub(2, std::memory_order_acq_rel) - 2;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement", p,
new_count);
if (new_count > 1) return;
delete p;
}
StrongPtrTraitsCache::decrement(Access::StaticCast<Cache>(cache));
}
inline bool TryToAcquireCacheStrongReference(CachePoolImpl* pool,
CacheImpl* cache_impl) {
auto old_count = cache_impl->reference_count_.load(std::memory_order_relaxed);
while (true) {
if (CacheImpl::ShouldDelete(old_count)) {
return false;
}
if (cache_impl->reference_count_.compare_exchange_weak(
old_count, old_count + CacheImpl::kStrongReferenceIncrement,
std::memory_order_acq_rel)) {
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT(
"Cache:increment", cache_impl,
old_count + CacheImpl::kStrongReferenceIncrement);
if (!CacheImpl::ShouldHoldPoolWeakReference(old_count)) {
AcquireWeakReference(pool);
}
return true;
}
}
}
CachePtr<Cache> GetCacheInternal(
CachePoolImpl* pool, const std::type_info& cache_type,
std::string_view cache_key,
absl::FunctionRef<std::unique_ptr<Cache>()> make_cache) {
CachePoolImpl::CacheKey key(cache_type, cache_key);
if (pool && !cache_key.empty()) {
absl::MutexLock lock(&pool->caches_mutex_);
auto it = pool->caches_.find(key);
if (it != pool->caches_.end()) {
auto* cache = *it;
if (!TryToAcquireCacheStrongReference(pool, cache)) {
pool->caches_.erase(it);
} else {
return CachePtr<Cache>(Access::StaticCast<Cache>(cache),
internal::adopt_object_ref);
}
}
}
std::unique_ptr<Cache> new_cache = make_cache();
if (!new_cache) return CachePtr<Cache>();
auto* cache_impl = Access::StaticCast<CacheImpl>(new_cache.get());
cache_impl->pool_ = pool;
if (!pool || cache_key.empty()) {
if (pool) {
AcquireWeakReference( | #include "tensorstore/internal/cache/cache.h"
#include <stddef.h>
#include <atomic>
#include <deque>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/testing/concurrent.h"
namespace {
using ::tensorstore::UniqueWriterLock;
using ::tensorstore::internal::Cache;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::CachePtr;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::WeakPinnedCacheEntry;
using ::tensorstore::internal_cache::Access;
using ::tensorstore::internal_cache::CacheEntryImpl;
using ::tensorstore::internal_cache::CacheImpl;
using ::tensorstore::internal_cache::CachePoolImpl;
using ::tensorstore::internal_cache::LruListNode;
using ::tensorstore::internal_testing::TestConcurrent;
using ::testing::ElementsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
CachePoolImpl* GetPoolImpl(const CachePool::StrongPtr& ptr) {
return Access::StaticCast<CachePoolImpl>(ptr.get());
}
CachePoolImpl* GetPoolImpl(const CachePool::WeakPtr& ptr) {
return Access::StaticCast<CachePoolImpl>(ptr.get());
}
class TestCache : public Cache {
public:
struct RequestLog {
absl::Mutex mutex;
std::deque<std::string> entry_allocate_log;
std::deque<std::pair<std::string, std::string>> entry_destroy_log;
std::deque<std::string> cache_allocate_log;
std::deque<std::string> cache_destroy_log;
};
class Entry : public Cache::Entry {
public:
using OwningCache = TestCache;
std::string data;
size_t size = 1;
void ChangeSize(size_t new_size) {
UniqueWriterLock<Cache::Entry> lock(*this);
size = new_size;
NotifySizeChanged();
}
~Entry() override {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->entry_destroy_log.emplace_back(cache_identifier_,
std::string(this->key()));
}
}
WeakPinnedCacheEntry weak_ref;
std::shared_ptr<RequestLog> log_;
std::string cache_identifier_;
};
explicit TestCache(std::shared_ptr<RequestLog> log = {}) : log_(log) {}
~TestCache() {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->cache_destroy_log.emplace_back(cache_identifier());
}
}
size_t DoGetSizeofEntry() override { return sizeof(Entry); }
Entry* DoAllocateEntry() override {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->entry_allocate_log.emplace_back(cache_identifier());
}
auto* entry = new Entry;
entry->cache_identifier_ = cache_identifier();
entry->log_ = log_;
return entry;
}
void OnDelete(Entry* entry) {}
size_t DoGetSizeInBytes(Cache::Entry* base_entry) override {
auto* entry = static_cast<Entry*>(base_entry);
return entry->size;
}
std::shared_ptr<RequestLog> log_;
};
class TestCacheWithCachePool : public TestCache {
public:
using TestCache::TestCache;
CachePool::WeakPtr cache_pool;
};
using EntryIdentifier = std::pair<std::string, void*>;
std::pair<std::string, void*> GetEntryIdentifier(CacheEntryImpl* entry) {
return {entry->key_, entry};
}
absl::flat_hash_set<EntryIdentifier> GetEntrySet(LruListNode* head) {
absl::flat_hash_set<EntryIdentifier> entries;
for (LruListNode* node = head->next; node != head; node = node->next) {
entries.emplace(
GetEntryIdentifier(Access::StaticCast<CacheEntryImpl>(node)));
}
return entries;
}
void AssertInvariants(const CachePool::StrongPtr& pool,
absl::flat_hash_set<Cache*> expected_caches)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto* pool_impl = GetPoolImpl(pool);
auto eviction_queue_entries = GetEntrySet(&pool_impl->eviction_queue_);
absl::flat_hash_set<EntryIdentifier> expected_eviction_queue_entries;
size_t expected_total_bytes = 0;
for (auto* cache : pool_impl->caches_) {
EXPECT_EQ(pool_impl, cache->pool_);
EXPECT_NE("", cache->cache_identifier_);
EXPECT_EQ(1, expected_caches.count(Access::StaticCast<Cache>(cache)));
}
EXPECT_EQ(1 + expected_caches.size(), pool_impl->weak_references_.load());
for (auto* cache : expected_caches) {
auto* cache_impl = Access::StaticCast<CacheImpl>(cache);
if (!cache_impl->cache_identifier_.empty()) {
auto it = pool_impl->caches_.find(cache_impl);
ASSERT_NE(it, pool_impl->caches_.end());
EXPECT_EQ(cache_impl, *it);
}
if (pool_impl->limits_.total_bytes_limit != 0) {
for (auto& shard : cache_impl->shards_) {
for (CacheEntryImpl* entry : shard.entries) {
EXPECT_EQ(
entry->num_bytes_,
cache->DoGetSizeInBytes(Access::StaticCast<Cache::Entry>(entry)));
expected_total_bytes += entry->num_bytes_;
if (entry->reference_count_.load() == 0) {
expected_eviction_queue_entries.emplace(GetEntryIdentifier(entry));
}
}
}
}
}
EXPECT_EQ(expected_total_bytes, pool_impl->total_bytes_);
EXPECT_THAT(expected_eviction_queue_entries,
::testing::IsSubsetOf(eviction_queue_entries));
}
#define TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(...) \
do { \
SCOPED_TRACE(""); \
AssertInvariants(__VA_ARGS__); \
} while (false)
template <typename CacheType = TestCache>
CachePtr<CacheType> GetTestCache(
CachePool* pool, std::string cache_identifier,
std::shared_ptr<TestCache::RequestLog> log = {}) {
return GetCache<CacheType>(pool, cache_identifier, [&] {
if (log) {
absl::MutexLock lock(&log->mutex);
log->cache_allocate_log.emplace_back(cache_identifier);
}
return std::make_unique<CacheType>(log);
});
}
TEST(CachePoolTest, GetCacheEmptyKey) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(pool.get(), "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
auto test_cache2 = GetTestCache(pool.get(), "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheEmptyKeyCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(nullptr, "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
auto test_cache2 = GetTestCache(nullptr, "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheNonEmptyKey) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
auto test_cache2 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
EXPECT_EQ(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("x"));
}
TEST(CachePoolTest, GetCacheNonEmptyKeyCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(nullptr, "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
auto test_cache2 = GetTestCache(nullptr, "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x", "x"));
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheNullptr) {
auto pool = CachePool::Make(CachePool::Limits{10000});
int make_cache_calls = 0;
auto make_cache = [&] {
++make_cache_calls;
return nullptr;
};
{
auto cache = GetCache<TestCache>(pool.get(), "x", make_cache);
EXPECT_EQ(nullptr, cache);
EXPECT_EQ(1, make_cache_calls);
}
{
auto cache = GetCache<TestCache>(pool.get(), "x", make_cache);
EXPECT_EQ(nullptr, cache);
EXPECT_EQ(2, make_cache_calls);
}
}
TEST(CachePoolTest, GetCacheNonEmptyKeyNoReferences) {
auto pool = CachePool::Make(CachePool::Limits{});
auto log = std::make_shared<TestCache::RequestLog>();
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
{
auto pool2 = pool;
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->strong_references_.load());
}
{
auto test_cache1 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
EXPECT_EQ(1, GetPoolImpl(pool)->caches_.size());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, test_cache1->use_count());
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("x"));
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(0, GetPoolImpl(pool)->caches_.size());
}
TEST(CachePoolTest, StrongToWeakToStrong) {
CachePool::StrongPtr strong_ptr = CachePool::Make({});
CachePool::WeakPtr weak_ptr(strong_ptr);
strong_ptr = CachePool::StrongPtr();
strong_ptr = CachePool::StrongPtr(weak_ptr);
weak_ptr = CachePool::WeakPtr();
}
class NamedOrAnonymousCacheTest : public ::testing::TestWithParam<const char*> {
public:
std::shared_ptr<TestCache::RequestLog> log =
std::make_shared<TestCache::RequestLog>();
std::string cache_key = GetParam();
CachePtr<TestCache> GetCache(const CachePool::StrongPtr& pool) {
return GetTestCache(pool.get(), cache_key, log);
}
};
INSTANTIATE_TEST_SUITE_P(WithoutCacheKey, NamedOrAnonymousCacheTest,
::testing::Values(""));
INSTANTIATE_TEST_SUITE_P(WithCacheKey, NamedOrAnonymousCacheTest,
::testing::Values("k"));
TEST_P(NamedOrAnonymousCacheTest, CacheEntryKeepsCacheAlive) {
{
PinnedCacheEntry<TestCache> entry;
{
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(cache_key));
entry = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
}
EXPECT_EQ(1, GetOwningCache(*entry).use_count());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre(cache_key));
}
TEST_P(NamedOrAnonymousCacheTest, GetWithImmediateEvict) {
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
EXPECT_EQ(1, test_cache->use_count());
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_EQ(2, test_cache->use_count());
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
e->data = "value";
EXPECT_EQ(1, e->use_count());
{
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ(2, test_cache->use_count());
EXPECT_EQ(2, e2->use_count());
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_EQ(1, e->use_count());
EXPECT_EQ(2, test_cache->use_count());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
EXPECT_EQ(1, test_cache->use_count());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key, cache_key));
EXPECT_EQ("", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log,
ElementsAre(Pair(cache_key, "a"), Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TEST_P(NamedOrAnonymousCacheTest, GetWithoutImmediateEvict) {
{
auto pool = CachePool::Make(kSmallCacheLimits);
auto test_cache = GetCache(pool);
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
e->data = "value";
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e1 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ("value", e1->data);
auto e2 = GetCacheEntry(test_cache, "b");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key, cache_key));
e2->data = "value2";
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
{
auto e1 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key, cache_key));
EXPECT_EQ("value", e1->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
{
auto e2 = GetCacheEntry(test_cache, "b");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key, cache_key));
EXPECT_EQ("value2", e2->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a"), Pair(cache_key, "b")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre(cache_key));
}
TEST(CacheTest, NamedGetWithoutImmediateEvict) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
{
auto test_cache = GetTestCache(pool.get(), "cache", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache"));
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
e->data = "value";
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
{
auto test_cache = GetTestCache(pool.get(), "cache");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
}
TEST_P(NamedOrAnonymousCacheTest, UpdateSizeThenEvict) {
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
{
auto entry = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
entry->data = "a";
entry->ChangeSize(5000);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("", GetCacheEntry(test_cache, "a")->data);
}
TEST_P(NamedOrAnonymousCacheTest, UpdateSizeNoEvict) {
CachePool::Limits limits;
limits.total_bytes_limit = 10000;
auto pool = CachePool::Make(limits);
auto test_cache = GetCache(pool);
{
auto entry = GetCacheEntry(test_cache, "a");
entry->data = "a";
entry->ChangeSize(1);
entry->ChangeSize(5000);
entry->ChangeSize(5000);
entry->ChangeSize(5000);
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto entry = GetCacheEntry(test_cache, "b");
entry->data = "b";
entry->ChangeSize(5000);
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("a", GetCacheEntry(test_cache, "a")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("b", GetCacheEntry(test_cache, "b")->data);
GetCacheEntry(test_cache, "c")->data = "c";
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("", GetCacheEntry(test_cache, "a")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("b", GetCacheEntry(test_cache, "b")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("c", GetCacheEntry(test_cache, "c")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a")));
}
TEST(CacheTest, CacheDependsOnOtherCache) {
class CacheA : public tensorstore::internal::Cache {
using Base = tensorstore::internal::Cache;
public:
class Entry : public Cache::Entry {};
using Base::Base;
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
};
class CacheB : public tensorstore::internal::Cache {
using Base = tensorstore::internal::Cache;
public:
class Entry : public Cache::Entry {};
using Base::Base;
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
CachePtr<CacheA> cache_a;
};
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache_a = GetCache<CacheA>(pool.get(), "x",
[&] { return std::make_unique<CacheA>(); });
auto cache_b = GetCache<CacheB>(pool.get(), "x",
[&] { return std::make_unique<CacheB>(); });
GetCacheEntry(cache_b, "key");
cache_b->cache_a = cache_a;
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool,
{cache_a.get(), cache_b.get()});
}
constexpr static int kDefaultIterations = 100;
TEST(CacheTest, ConcurrentGetCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
PinnedCacheEntry<TestCache> pinned_entries[3];
TestConcurrent(
kDefaultIterations,
[] {},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(2, cache->use_count());
for (auto& e : pinned_entries) {
e.reset();
}
EXPECT_EQ(1, cache->use_count());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
},
[&] { pinned_entries[0] = GetCacheEntry(cache, "a"); },
[&] { pinned_entries[1] = GetCacheEntry(cache, "a"); },
[&] { pinned_entries[2] = GetCacheEntry(cache, "a"); });
}
TEST(CacheTest, ConcurrentGetCacheEntryWeakReferenceCacheDisabled) {
auto cache = GetTestCache(nullptr, "cache");
PinnedCacheEntry<TestCache> entry;
TestConcurrent(
kDefaultIterations,
[&] { entry = GetCacheEntry(cache, "a"); },
[&] {},
[&] { entry->AcquireWeakReference(); },
[&] { entry->AcquireWeakReference(); });
}
TEST(CacheTest,
ConcurrentDestroyStrongAndWeakCacheEntryReferenceCacheDisabled) {
auto cache = GetTestCache(nullptr, "cache");
PinnedCacheEntry<TestCache> entry;
WeakPinnedCacheEntry weak_ref;
TestConcurrent(
kDefaultIterations,
[&] {
entry = GetCacheEntry(cache, "a");
weak_ref = entry->AcquireWeakReference();
},
[&] {},
[&] { entry = {}; }, [&] { weak_ref = {}; });
}
TEST(CacheTest, ConcurrentGetCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
CachePtr<TestCache> caches[3];
TestConcurrent(
kDefaultIterations,
[] {},
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {caches[0].get(), caches[1].get(), caches[2].get()});
size_t use_count = 3;
for (auto& cache : caches) {
EXPECT_EQ(use_count, cache->use_count());
cache.reset();
--use_count;
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] { caches[0] = GetTestCache(pool.get(), "cache"); },
[&] { caches[1] = GetTestCache(pool.get(), "cache"); },
[&] { caches[2] = GetTestCache(pool.get(), "cache"); });
}
TEST(CacheTest, ConcurrentReleaseCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
CachePtr<TestCache> caches[3];
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
for (auto& cache : caches) {
cache = GetTestCache(pool.get(), "cache");
}
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
},
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] { caches[0].reset(); }, [&] { caches[1].reset(); },
[&] { caches[2].reset(); });
}
TEST(CacheTest, ConcurrentGetReleaseCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
const auto concurrent_op = [&] {
auto cache = GetTestCache(pool.get(), "cache");
};
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] {},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, ConcurrentReleaseCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
PinnedCacheEntry<TestCache> pinned_entries[3];
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
for (auto& e : pinned_entries) {
e = GetCacheEntry(cache, "a");
}
EXPECT_EQ(2, cache->use_count());
},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, cache->use_count());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
},
[&] { pinned_entries[0].reset(); }, [&] { pinned_entries[1].reset(); },
[&] { pinned_entries[2].reset(); });
}
TEST(CacheTest, ConcurrentGetReleaseCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
const auto concurrent_op = [&] {
auto entry = GetCacheEntry(cache, "a");
};
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, cache->use_count());
},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, cache->use_count());
},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, ConcurrentDestroyCacheEvictEntries) {
CachePool::Limits limits = {};
limits.total_bytes_limit = 1;
auto pool = CachePool::Make(limits);
const auto concurrent_op = [&] {
auto cache = GetTestCache(pool.get(), "");
auto entry = GetCacheEntry(cache, "a");
};
TestConcurrent(
kDefaultIterations,
[&] {},
[&] {},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, EvictEntryDestroyCache) {
auto log = std::make_shared<TestCache::RequestLog>();
CachePool::Limits limits;
limits.total_bytes_limit = 1;
auto pool = CachePool::Make(limits);
auto cache_b = GetTestCache(pool.get(), "cache_b", log);
{
auto cache_a = GetTestCache(pool.get(), "cache_a", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_b", "cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
entry_a->data = "entry_a";
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
{
auto cache_a = GetTestCache(pool.get(), "cache_a");
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->cache_allocate_log,
ElementsAre("cache_b", "cache_a"));
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
ASSERT_EQ("entry_a", entry_a->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {cache_a.get(), cache_b.get()});
}
auto entry_b = GetCacheEntry(cache_b, "entry_b");
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache_a", "entry_a")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a"));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache_b.get()});
{
auto cache_a = GetTestCache(pool.get(), "cache_a");
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_EQ("", entry_a->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {cache_a.get(), cache_b.get()});
}
}
TEST(CacheTest, CachePoolWeakPtr) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
auto cache_a = GetTestCache(pool.get(), "cache_a", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_ |
658 | cpp | google/tensorstore | fixed_token_auth_provider | tensorstore/internal/oauth2/fixed_token_auth_provider.cc | tensorstore/internal/oauth2/fixed_token_auth_provider_test.cc | #ifndef TENSORSTORE_INTERNAL_OAUTH2_FIXED_TOKEN_AUTH_PROVIDER_H_
#define TENSORSTORE_INTERNAL_OAUTH2_FIXED_TOKEN_AUTH_PROVIDER_H_
#include <string>
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
class FixedTokenAuthProvider : public AuthProvider {
public:
~FixedTokenAuthProvider() override = default;
FixedTokenAuthProvider(std::string token);
Result<BearerTokenWithExpiration> GetToken() override;
private:
std::string token_;
};
}
}
#endif
#include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "absl/time/time.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
FixedTokenAuthProvider::FixedTokenAuthProvider(std::string token)
: token_(token) {}
Result<BearerTokenWithExpiration> FixedTokenAuthProvider::GetToken() {
return BearerTokenWithExpiration{token_, absl::InfiniteFuture()};
}
}
} | #include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal_oauth2::FixedTokenAuthProvider;
TEST(FixedTokenAuthProvider, Minimal) {
FixedTokenAuthProvider auth("token");
auto result = auth.GetToken();
EXPECT_TRUE(result.ok());
EXPECT_EQ("token", result->token);
EXPECT_LT(absl::Now(), result->expiration);
}
} |
659 | cpp | google/tensorstore | google_service_account_auth_provider | tensorstore/internal/oauth2/google_service_account_auth_provider.cc | tensorstore/internal/oauth2/google_service_account_auth_provider_test.cc | #ifndef TENSORSTORE_INTERNAL_OAUTH2_GOOGLE_SERVICE_ACCOUNT_AUTH_PROVIDER_H_
#define TENSORSTORE_INTERNAL_OAUTH2_GOOGLE_SERVICE_ACCOUNT_AUTH_PROVIDER_H_
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
class GoogleServiceAccountAuthProvider : public RefreshableAuthProvider {
public:
using AccountCredentials = internal_oauth2::GoogleServiceAccountCredentials;
~GoogleServiceAccountAuthProvider() override = default;
GoogleServiceAccountAuthProvider(
const AccountCredentials& creds,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock = {});
protected:
virtual Result<internal_http::HttpResponse> IssueRequest(
std::string_view method, std::string_view uri, absl::Cord payload);
private:
Result<BearerTokenWithExpiration> Refresh() override;
const AccountCredentials creds_;
std::string uri_;
std::string scope_;
std::shared_ptr<internal_http::HttpTransport> transport_;
};
}
}
#endif
#include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_oauth2 {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
constexpr char kOAuthV4Url[] = "https:
constexpr char kOAuthScope[] = "https:
GoogleServiceAccountAuthProvider::GoogleServiceAccountAuthProvider(
const AccountCredentials& creds,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
creds_(creds),
uri_(kOAuthV4Url),
scope_(kOAuthScope),
transport_(std::move(transport)) {}
Result<HttpResponse> GoogleServiceAccountAuthProvider::IssueRequest(
std::string_view method, std::string_view uri, absl::Cord payload) {
return transport_
->IssueRequest(
HttpRequestBuilder(method, std::string{uri})
.AddHeader("Content-Type: application/x-www-form-urlencoded")
.BuildRequest(),
internal_http::IssueRequestOptions(std::move(payload)))
.result();
}
Result<BearerTokenWithExpiration> GoogleServiceAccountAuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto body,
internal_oauth2::BuildSignedJWTRequest(
creds_.private_key,
internal_oauth2::BuildJWTHeader(creds_.private_key_id),
internal_oauth2::BuildJWTClaimBody(creds_.client_email, scope_, uri_,
now, 3600 )));
TENSORSTORE_ASSIGN_OR_RETURN(
auto response, IssueRequest("POST", uri_, absl::Cord(std::move(body))));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::GoogleServiceAccountAuthProvider;
using ::tensorstore::internal_oauth2::GoogleServiceAccountCredentials;
const char kServiceAccountInfo[] = R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})";
const GoogleServiceAccountCredentials kCreds{
"a1a111aa1111a11a11a11aa111a111a1a1111111",
GetFakePrivateKey(),
"https:
"[email protected]",
};
constexpr char kBody[] =
"grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&"
"assertion="
"eyJhbGciOiJSUzI1NiIsImtpZCI6ImExYTExMWFhMTExMWExMWExMWExMWFhMTExYTExMWExYT"
"ExMTExMTEiLCJ0eXAiOiJKV1QifQ."
"eyJhdWQiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjQvdG9rZW4iLCJleH"
"AiOjE1NDc2Njk3MDMsImlhdCI6MTU0NzY2NjEwMywiaXNzIjoiZm9vLWVtYWlsQGZvby1wcm9q"
"ZWN0LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2NvcGUiOiJodHRwczovL3d3dy5nb29nbG"
"VhcGlzLmNvbS9hdXRoL2Nsb3VkLXBsYXRmb3JtIn0.gvM1sjnFXwQkBTTqobnTJqE8ZCrAR-"
"SEevEZB4Quqxd836v7iHjnWBiOkUCZl_o5wQouz5pFuhkQ1BlhhAZNih_Ko2yxBi0W_NuhI-"
"18We8gSMhi8pwfNu6WqNqXkHlQAJebhJQH23yP_A2dxU3Z50maUJaAl9G0e60CIynsaeW-"
"o7QneaPxPEWjOi--XMvkOu-z8eD0CXx1dUrlzINDxWzJFoXzCk2_NZ9-"
"UPzHWai68qKo2FjbtTT3fEPA-L1IN908OWhuN2UHdvPrg_"
"h13GO7kY3K7TsWotsgsLon2KxWYaDpasaY_ZqCIXCeS4jW89gVtsOB3E6B-xdR1Gq-9g";
class TestAuthProvider : public GoogleServiceAccountAuthProvider {
public:
TestAuthProvider(const GoogleServiceAccountCredentials& creds)
: GoogleServiceAccountAuthProvider(creds, nullptr,
[this] { return this->time; }),
time(absl::FromUnixSeconds(1547666103)),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord body) {
request.push_back(std::make_pair(std::string(uri), std::string(body)));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::pair<std::string, std::string>> request;
};
TEST(GoogleServiceAccountAuthProviderTest, InitialState) {
TestAuthProvider auth({"a", "b", "c", "d"});
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(GoogleServiceAccountAuthProviderTest, BadKeys) {
TestAuthProvider auth({"a", "b", "c", "d"});
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
EXPECT_EQ(0, auth.request.size());
}
TEST(OAuth2AuthProviderTest, NoResponse) {
TestAuthProvider auth(kCreds);
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(kBody, auth.request[0].second);
}
TEST(GoogleServiceAccountAuthProviderTest, Status200) {
TestAuthProvider auth(kCreds);
auth.responses = {
{0,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
{1,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
};
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(1, auth.request.size());
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(2, auth.request.size());
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
} |
660 | cpp | google/tensorstore | gce_auth_provider | tensorstore/internal/oauth2/gce_auth_provider.cc | tensorstore/internal/oauth2/gce_auth_provider_test.cc | #ifndef TENSORSTORE_INTERNAL_OAUTH2_GCE_AUTH_PROVIDER_H_
#define TENSORSTORE_INTERNAL_OAUTH2_GCE_AUTH_PROVIDER_H_
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
std::string GceMetadataHostname();
class GceAuthProvider : public RefreshableAuthProvider {
public:
struct ServiceAccountInfo {
std::string email;
std::vector<std::string> scopes;
};
~GceAuthProvider() override = default;
GceAuthProvider(std::shared_ptr<internal_http::HttpTransport> transport,
const ServiceAccountInfo& service_account_info,
std::function<absl::Time()> clock = {});
static Result<ServiceAccountInfo> GetDefaultServiceAccountInfoIfRunningOnGce(
internal_http::HttpTransport* transport);
protected:
virtual Result<internal_http::HttpResponse> IssueRequest(std::string path,
bool recursive);
private:
Result<BearerTokenWithExpiration> Refresh() override;
absl::Status RetrieveServiceAccountInfo();
std::string service_account_email_;
std::set<std::string> scopes_;
std::shared_ptr<internal_http::HttpTransport> transport_;
};
}
}
#endif
#include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include <functional>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <utility>
#include "absl/flags/flag.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
ABSL_FLAG(std::optional<std::string>, tensorstore_gce_metadata_root,
std::nullopt,
"Url to used for http access metadata.google.internal. "
"Overrides GCE_METADATA_ROOT.");
namespace tensorstore {
namespace internal_oauth2 {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
constexpr static auto ServiceAccountInfoBinder = jb::Object(
jb::Member("email",
jb::Projection(&GceAuthProvider::ServiceAccountInfo::email,
jb::NonEmptyStringBinder)),
jb::Member("scopes",
jb::Projection(&GceAuthProvider::ServiceAccountInfo::scopes)),
jb::DiscardExtraMembers);
}
std::string GceMetadataHostname() {
return GetFlagOrEnvValue(FLAGS_tensorstore_gce_metadata_root,
"GCE_METADATA_ROOT")
.value_or("metadata.google.internal");
}
GceAuthProvider::GceAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport,
const ServiceAccountInfo& service_account_info,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
service_account_email_(service_account_info.email),
scopes_(service_account_info.scopes.begin(),
service_account_info.scopes.end()),
transport_(std::move(transport)) {}
Result<HttpResponse> GceAuthProvider::IssueRequest(std::string path,
bool recursive) {
HttpRequestBuilder request_builder(
"GET", internal::JoinPath("http:
request_builder.AddHeader("Metadata-Flavor: Google");
if (recursive) {
request_builder.AddQueryParameter("recursive", "true");
}
return transport_->IssueRequest(request_builder.BuildRequest(), {}).result();
}
Result<GceAuthProvider::ServiceAccountInfo>
GceAuthProvider::GetDefaultServiceAccountInfoIfRunningOnGce(
internal_http::HttpTransport* transport) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto response,
transport
->IssueRequest(
HttpRequestBuilder(
"GET",
internal::JoinPath(
"http:
"/computeMetadata/v1/instance/service-accounts/default/"))
.AddHeader("Metadata-Flavor: Google")
.AddQueryParameter("recursive", "true")
.BuildRequest(),
{})
.result());
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
auto info_response = internal::ParseJson(response.payload.Flatten());
if (info_response.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Failed to parse service account response: ",
response.payload.Flatten()));
}
return jb::FromJson<ServiceAccountInfo>(info_response,
ServiceAccountInfoBinder);
}
Result<BearerTokenWithExpiration> GceAuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto response,
IssueRequest(
tensorstore::StrCat("/computeMetadata/v1/instance/service-accounts/",
service_account_email_, "/token"),
false));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/time/clock.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::GceAuthProvider;
const char kOAuthResponse[] = R"(
{
"token_type" : "refresh",
"access_token": "abc",
"expires_in": 456
}
)";
class TestAuthProvider : public GceAuthProvider {
public:
TestAuthProvider()
: GceAuthProvider(nullptr, {"[email protected]", {"abc", "xyz"}},
[this] { return this->time; }),
time(absl::Now()),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string path, bool recursive) {
request.emplace_back(std::move(path));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::string> request;
};
TEST(GceAuthProviderTest, InitialState) {
TestAuthProvider auth;
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(GceAuthProviderTest, Status200) {
TestAuthProvider auth;
auth.responses = {
{0, {200, absl::Cord(kOAuthResponse), {}}},
{1, {200, absl::Cord(kOAuthResponse), {}}},
};
EXPECT_FALSE(auth.IsValid());
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
TEST(GceAuthProviderTest, NoResponse) {
TestAuthProvider auth;
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ(
"/computeMetadata/v1/instance/service-accounts/[email protected]/token",
auth.request[0]);
}
TEST(GceAuthProviderTest, Status400) {
TestAuthProvider auth;
auth.responses = {
{0, {400, absl::Cord(kOAuthResponse), {}}},
};
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_FALSE(result.ok()) << result.status();
}
TEST(GceAuthProviderTest, Hostname) {
EXPECT_EQ("metadata.google.internal",
tensorstore::internal_oauth2::GceMetadataHostname());
tensorstore::internal::SetEnv("GCE_METADATA_ROOT", "localhost");
EXPECT_EQ("localhost", tensorstore::internal_oauth2::GceMetadataHostname());
tensorstore::internal::UnsetEnv("GCE_METADATA_ROOT");
}
} |
661 | cpp | google/tensorstore | google_auth_provider | tensorstore/internal/oauth2/google_auth_provider.cc | tensorstore/internal/oauth2/google_auth_provider_test.cc | #ifndef TENSORSTORE_INTERNAL_OAUTH2_GOOGLE_AUTH_PROVIDER_H_
#define TENSORSTORE_INTERNAL_OAUTH2_GOOGLE_AUTH_PROVIDER_H_
#include <functional>
#include <memory>
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
Result<std::unique_ptr<AuthProvider>> GetGoogleAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport =
internal_http::GetDefaultHttpTransport());
Result<std::shared_ptr<AuthProvider>> GetSharedGoogleAuthProvider();
void ResetSharedGoogleAuthProvider();
using GoogleAuthProvider =
std::function<Result<std::unique_ptr<AuthProvider>>()>;
void RegisterGoogleAuthProvider(GoogleAuthProvider provider, int priority);
}
}
#endif
#include "tensorstore/internal/oauth2/google_auth_provider.h"
#include <algorithm>
#include <fstream>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_oauth2 {
namespace {
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::JoinPath;
constexpr char kGoogleAuthTokenForTesting[] = "GOOGLE_AUTH_TOKEN_FOR_TESTING";
constexpr char kGoogleApplicationCredentials[] =
"GOOGLE_APPLICATION_CREDENTIALS";
constexpr char kCloudSdkConfig[] = "CLOUDSDK_CONFIG";
constexpr char kGCloudConfigFolder[] = ".config/gcloud/";
constexpr char kWellKnownCredentialsFile[] =
"application_default_credentials.json";
constexpr char kOAuthV3Url[] = "https:
bool IsFile(const std::string& filename) {
std::ifstream fstream(filename.c_str());
return fstream.good();
}
Result<std::string> GetEnvironmentVariableFileName() {
auto env = GetEnv(kGoogleApplicationCredentials);
if (!env || !IsFile(*env)) {
return absl::NotFoundError(tensorstore::StrCat(
"$", kGoogleApplicationCredentials, " is not set or corrupt."));
}
return *env;
}
Result<std::string> GetWellKnownFileName() {
std::string result;
auto config_dir_override = GetEnv(kCloudSdkConfig);
if (config_dir_override) {
result = JoinPath(*config_dir_override, kWellKnownCredentialsFile);
} else {
auto home_dir = GetEnv("HOME");
if (!home_dir) {
return absl::NotFoundError("Could not read $HOME.");
}
result =
JoinPath(*home_dir, kGCloudConfigFolder, kWellKnownCredentialsFile);
}
if (!IsFile(result)) {
return absl::NotFoundError(
tensorstore::StrCat("Could not find the credentials file in the "
"standard gcloud location [",
result, "]"));
}
return result;
}
struct AuthProviderRegistry {
std::vector<std::pair<int, GoogleAuthProvider>> providers;
absl::Mutex mutex;
};
AuthProviderRegistry& GetGoogleAuthProviderRegistry() {
static absl::NoDestructor<AuthProviderRegistry> registry;
return *registry;
}
Result<std::unique_ptr<AuthProvider>> GetDefaultGoogleAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport) {
std::unique_ptr<AuthProvider> result;
auto var = GetEnv(kGoogleAuthTokenForTesting);
if (var) {
ABSL_LOG(INFO) << "Using GOOGLE_AUTH_TOKEN_FOR_TESTING";
result.reset(new FixedTokenAuthProvider(std::move(*var)));
return std::move(result);
}
absl::Status status;
auto credentials_filename = GetEnvironmentVariableFileName();
if (!credentials_filename) {
credentials_filename = GetWellKnownFileName();
}
if (credentials_filename.ok()) {
ABSL_LOG(INFO) << "Using credentials at " << *credentials_filename;
std::ifstream credentials_fstream(*credentials_filename);
auto json = ::nlohmann::json::parse(credentials_fstream, nullptr, false);
auto refresh_token = internal_oauth2::ParseRefreshToken(json);
if (refresh_token.ok()) {
ABSL_LOG(INFO) << "Using OAuth2 AuthProvider";
result.reset(new OAuth2AuthProvider(*refresh_token, kOAuthV3Url,
std::move(transport)));
return std::move(result);
}
auto service_account =
internal_oauth2::ParseGoogleServiceAccountCredentials(json);
if (service_account.ok()) {
ABSL_LOG(INFO) << "Using ServiceAccount AuthProvider";
result.reset(new GoogleServiceAccountAuthProvider(*service_account,
std::move(transport)));
return std::move(result);
}
status = absl::UnknownError(
tensorstore::StrCat("Unexpected content of the JSON credentials file: ",
*credentials_filename));
}
if (auto gce_service_account =
GceAuthProvider::GetDefaultServiceAccountInfoIfRunningOnGce(
transport.get());
gce_service_account.ok()) {
ABSL_LOG(INFO) << "Running on GCE, using service account "
<< gce_service_account->email;
result.reset(
new GceAuthProvider(std::move(transport), *gce_service_account));
return std::move(result);
}
if (!credentials_filename.ok()) {
ABSL_LOG(ERROR)
<< credentials_filename.status().message()
<< ". You may specify a credentials file using $"
<< kGoogleApplicationCredentials
<< ", or to use Google application default credentials, run: "
"gcloud auth application-default login";
}
TENSORSTORE_RETURN_IF_ERROR(status);
return absl::NotFoundError(
"Could not locate the credentials file and not running on GCE.");
}
struct SharedGoogleAuthProviderState {
absl::Mutex mutex;
std::optional<Result<std::shared_ptr<AuthProvider>>> auth_provider
ABSL_GUARDED_BY(mutex);
};
SharedGoogleAuthProviderState& GetSharedGoogleAuthProviderState() {
static absl::NoDestructor<SharedGoogleAuthProviderState> state;
return *state;
}
}
void RegisterGoogleAuthProvider(GoogleAuthProvider provider, int priority) {
auto& registry = GetGoogleAuthProviderRegistry();
absl::WriterMutexLock lock(®istry.mutex);
registry.providers.emplace_back(priority, std::move(provider));
std::sort(registry.providers.begin(), registry.providers.end(),
[](const auto& a, const auto& b) { return a.first < b.first; });
}
Result<std::unique_ptr<AuthProvider>> GetGoogleAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport) {
{
auto& registry = GetGoogleAuthProviderRegistry();
absl::ReaderMutexLock lock(®istry.mutex);
for (const auto& provider : registry.providers) {
auto auth_result = provider.second();
if (auth_result.ok()) return auth_result;
}
}
return internal_oauth2::GetDefaultGoogleAuthProvider(std::move(transport));
}
Result<std::shared_ptr<AuthProvider>> GetSharedGoogleAuthProvider() {
auto& state = GetSharedGoogleAuthProviderState();
absl::MutexLock lock(&state.mutex);
if (!state.auth_provider) {
state.auth_provider.emplace(GetGoogleAuthProvider());
}
return *state.auth_provider;
}
void ResetSharedGoogleAuthProvider() {
auto& state = GetSharedGoogleAuthProviderState();
absl::MutexLock lock(&state.mutex);
state.auth_provider = std::nullopt;
}
}
} | #include "tensorstore/internal/oauth2/google_auth_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include "tensorstore/internal/oauth2/google_auth_test_utils.h"
#include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_http::ApplyResponseToHandler;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpResponseHandler;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
using ::tensorstore::internal_oauth2::AuthProvider;
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::GetGoogleAuthProvider;
using ::tensorstore::internal_oauth2::GoogleAuthTestScope;
class TestData
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteApplicationDefaultCredentials() {
auto p = JoinPath(path(), "application_default_credentials.json");
std::ofstream ofs(p);
ofs << R"({
"client_id": "fake-client-id.apps.googleusercontent.com",
"client_secret": "fake-client-secret",
"refresh_token": "fake-refresh-token",
"type": "authorized_user"
})";
return p;
}
std::string WriteServiceAccountCredentials() {
auto p = JoinPath(path(), "service_account_credentials.json");
std::ofstream ofs(p);
ofs << R"({
"type": "service_account",
"project_id": "fake_project_id",
"private_key_id": "fake_key_id",
"client_email": "fake-test-project.iam.gserviceaccount.com",
"client_id": "fake_client_id",
"auth_uri": "https:
"token_uri": "https:
"auth_provider_x509_cert_url": "https:
"client_x509_cert_url": "https:
)";
ofs << " \"private_key\": \"" << absl::CEscape(GetFakePrivateKey())
<< "\" }";
return p;
}
};
class MetadataMockTransport : public HttpTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) override {
ApplyResponseToHandler(
[&]() -> tensorstore::Result<HttpResponse> {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (!absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal")) {
return absl::UnimplementedError("Mock cannot satisfy the request.");
}
constexpr char kOAuthPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/[email protected]/token";
if (absl::StartsWith(parsed.authority_and_path, kOAuthPath)) {
if (!has_service_account_) {
return HttpResponse{404, absl::Cord()};
}
return HttpResponse{
200,
absl::Cord(
R"({ "token_type" : "refresh", "access_token": "abc", "expires_in": 3600 })")};
}
constexpr char kServiceAccountPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/default/";
if (absl::StartsWith(parsed.authority_and_path,
kServiceAccountPath)) {
if (!has_service_account_) {
return HttpResponse{404, absl::Cord()};
}
return HttpResponse{
200,
absl::Cord(
R"({ "email": "[email protected]", "scopes": [ "test" ] })")};
}
return HttpResponse{200, absl::Cord()};
}(),
response_handler);
}
void set_has_service_account(bool has_service_account) {
has_service_account_ = has_service_account;
}
bool has_service_account_ = false;
};
class GoogleAuthProviderTest : public ::testing::Test {
public:
GoogleAuthTestScope google_auth_test_scope;
static void SetUpTestSuite() {
SetDefaultHttpTransport(mock_transport);
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
}
static void TearDownTestSuite() {
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
SetDefaultHttpTransport(nullptr);
}
static std::shared_ptr<MetadataMockTransport> mock_transport;
};
std::shared_ptr<MetadataMockTransport> GoogleAuthProviderTest::mock_transport =
std::make_shared<MetadataMockTransport>();
TEST_F(GoogleAuthProviderTest, Invalid) {
SetEnv("GCE_METADATA_ROOT", "invalidmetadata.google.internal");
auto auth_provider = GetGoogleAuthProvider();
EXPECT_FALSE(auth_provider.ok());
UnsetEnv("GCE_METADATA_ROOT");
}
TEST_F(GoogleAuthProviderTest, AuthTokenForTesting) {
SetEnv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "abc");
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::FixedTokenAuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
std::unique_ptr<AuthProvider> auth = std::move(*auth_provider);
auto token = auth->GetToken();
ASSERT_TRUE(token.ok());
EXPECT_EQ("abc", token->token);
}
TEST_F(GoogleAuthProviderTest, GoogleOAuth2AccountCredentialsFromSDKConfig) {
TestData test_data;
test_data.WriteServiceAccountCredentials();
test_data.WriteApplicationDefaultCredentials();
SetEnv("CLOUDSDK_CONFIG", test_data.path().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::OAuth2AuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GoogleOAuth2AccountCredentials) {
TestData test_data;
SetEnv("GOOGLE_APPLICATION_CREDENTIALS",
test_data.WriteApplicationDefaultCredentials().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::OAuth2AuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GoogleServiceAccountCredentials) {
TestData test_data;
SetEnv("GOOGLE_APPLICATION_CREDENTIALS",
test_data.WriteServiceAccountCredentials().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance = dynamic_cast<
tensorstore::internal_oauth2::GoogleServiceAccountAuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GceWithServiceAccount) {
mock_transport->set_has_service_account(true);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto auth_provider, GetGoogleAuthProvider());
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::GceAuthProvider*>(
auth_provider.get());
EXPECT_FALSE(instance == nullptr);
}
EXPECT_THAT(auth_provider->GetAuthHeader(),
::testing::Optional(std::string("Authorization: Bearer abc")));
}
TEST_F(GoogleAuthProviderTest, GceWithoutServiceAccount) {
mock_transport->set_has_service_account(false);
EXPECT_THAT(GetGoogleAuthProvider(),
tensorstore::MatchesStatus(absl::StatusCode::kNotFound));
}
} |
662 | cpp | google/tensorstore | oauth_utils | tensorstore/internal/oauth2/oauth_utils.cc | tensorstore/internal/oauth2/oauth_utils_test.cc | #ifndef TENSORSTORE_INTERNAL_OAUTH2_OAUTH_UTILS_H_
#define TENSORSTORE_INTERNAL_OAUTH2_OAUTH_UTILS_H_
#include <stdint.h>
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
Result<std::string> SignWithRSA256(std::string_view private_key,
std::string_view to_sign);
std::string BuildJWTHeader(std::string_view key_id);
std::string BuildJWTClaimBody(std::string_view client_email,
std::string_view scope,
std::string_view audience, absl::Time now,
int64_t lifetime = 3600);
Result<std::string> BuildSignedJWTRequest(std::string_view private_key,
std::string_view header,
std::string_view body);
struct GoogleServiceAccountCredentials {
std::string private_key_id;
std::string private_key;
std::string token_uri;
std::string client_email;
};
Result<GoogleServiceAccountCredentials>
ParseGoogleServiceAccountCredentialsImpl(const ::nlohmann::json& credentials);
Result<GoogleServiceAccountCredentials> ParseGoogleServiceAccountCredentials(
std::string_view source);
template <typename T>
std::enable_if_t<std::is_same_v<T, ::nlohmann::json>,
Result<GoogleServiceAccountCredentials>>
ParseGoogleServiceAccountCredentials(const T& json) {
return ParseGoogleServiceAccountCredentialsImpl(json);
}
struct RefreshToken {
std::string client_id;
std::string client_secret;
std::string refresh_token;
};
Result<RefreshToken> ParseRefreshTokenImpl(const ::nlohmann::json& credentials);
Result<RefreshToken> ParseRefreshToken(std::string_view source);
template <typename T>
std::enable_if_t<std::is_same_v<T, ::nlohmann::json>, Result<RefreshToken>>
ParseRefreshToken(const T& json) {
return ParseRefreshTokenImpl(json);
}
struct OAuthResponse {
int64_t expires_in;
std::string token_type;
std::string access_token;
};
Result<OAuthResponse> ParseOAuthResponseImpl(
const ::nlohmann::json& credentials);
Result<OAuthResponse> ParseOAuthResponse(std::string_view source);
template <typename T>
std::enable_if_t<std::is_same_v<T, ::nlohmann::json>, Result<OAuthResponse>>
ParseOAuthResponse(const T& json) {
return ParseOAuthResponseImpl(json);
}
struct ErrorResponse {
std::string error;
std::string error_description;
std::string error_uri;
std::string error_subtype;
};
Result<ErrorResponse> ParseErrorResponse(const ::nlohmann::json& error);
}
}
#endif
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include <stddef.h>
#include <memory>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/time/time.h"
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/pem.h>
#include <openssl/rsa.h>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace {
constexpr char kCryptoAlgorithm[] = "RS256";
constexpr char kJwtType[] = "JWT";
constexpr char kGrantType[] =
"urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer";
}
namespace internal_oauth2 {
Result<std::string> SignWithRSA256(std::string_view private_key,
std::string_view to_sign) {
if (private_key.empty()) {
return absl::InternalError("No private key provided.");
}
const auto md = EVP_sha256();
assert(md != nullptr);
auto md_ctx = std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)>(
EVP_MD_CTX_create(), &EVP_MD_CTX_free);
assert(md_ctx != nullptr);
auto pem_buffer = std::unique_ptr<BIO, decltype(&BIO_free)>(
BIO_new_mem_buf(static_cast<const char*>(private_key.data()),
static_cast<int>(private_key.length())),
&BIO_free);
if (!pem_buffer) {
return absl::InternalError("Could not create the PEM buffer.");
}
auto key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
PEM_read_bio_PrivateKey(
static_cast<BIO*>(pem_buffer.get()),
nullptr,
nullptr,
nullptr),
&EVP_PKEY_free);
if (!key) {
return absl::InternalError("Could not load the private key.");
}
if (EVP_DigestSignInit(md_ctx.get(), nullptr, md, nullptr, key.get()) != 1) {
return absl::InternalError("DigestInit failed.");
}
if (EVP_DigestSignUpdate(md_ctx.get(), to_sign.data(), to_sign.size()) != 1) {
return absl::InternalError("DigestUpdate failed.");
}
size_t sig_len = 0;
if (EVP_DigestSignFinal(md_ctx.get(), nullptr, &sig_len) != 1) {
return absl::InternalError("DigestFinal (get signature length) failed.");
}
std::unique_ptr<unsigned char[]> sig(new unsigned char[sig_len]);
if (EVP_DigestSignFinal(md_ctx.get(), sig.get(), &sig_len) != 1) {
return absl::InternalError("DigestFinal (signature compute) failed.");
}
std::string signature;
absl::WebSafeBase64Escape(
std::string_view(reinterpret_cast<char*>(sig.get()), sig_len),
&signature);
return std::move(signature);
}
std::string BuildJWTHeader(std::string_view key_id) {
::nlohmann::json assertion_header = {
{"alg", kCryptoAlgorithm},
{"typ", kJwtType},
{"kid", std::string(key_id)},
};
std::string encoded_header;
absl::WebSafeBase64Escape(assertion_header.dump(), &encoded_header);
return encoded_header;
}
std::string BuildJWTClaimBody(std::string_view client_email,
std::string_view scope,
std::string_view audience, absl::Time now,
std::int64_t lifetime) {
const std::int64_t request_timestamp_sec = absl::ToUnixSeconds(now);
const std::int64_t expiration_timestamp_sec =
request_timestamp_sec + lifetime;
::nlohmann::json assertion_payload = {
{"iss", std::string(client_email)}, {"scope", std::string(scope)},
{"aud", std::string(audience)}, {"iat", request_timestamp_sec},
{"exp", expiration_timestamp_sec},
};
std::string encoded_payload;
absl::WebSafeBase64Escape(assertion_payload.dump(), &encoded_payload);
return encoded_payload;
}
Result<std::string> BuildSignedJWTRequest(std::string_view private_key,
std::string_view header,
std::string_view body) {
auto claim = tensorstore::StrCat(header, ".", body);
auto result = SignWithRSA256(private_key, claim);
if (!result) {
return result.status();
}
return tensorstore::StrCat("grant_type=", kGrantType, "&assertion=", claim,
".", *result);
}
constexpr static auto ErrorResponseBinder = jb::Object(
jb::Member("error",
jb::Projection(&ErrorResponse::error, jb::NonEmptyStringBinder)),
jb::Member("error_description",
jb::Projection(&ErrorResponse::error_description,
jb::NonEmptyStringBinder)),
jb::Member("error_uri", jb::Projection(&ErrorResponse::error_uri,
jb::NonEmptyStringBinder)),
jb::Member("error_subtype", jb::Projection(&ErrorResponse::error_subtype,
jb::NonEmptyStringBinder)),
jb::DiscardExtraMembers);
Result<ErrorResponse> ParseErrorResponse(const ::nlohmann::json& error) {
if (error.is_discarded()) {
return absl::InvalidArgumentError("Invalid ErrorResponse");
}
return jb::FromJson<ErrorResponse>(error, ErrorResponseBinder);
}
constexpr static auto GoogleServiceAccountCredentialsBinder = jb::Object(
jb::Member("private_key",
jb::Projection(&GoogleServiceAccountCredentials::private_key,
jb::NonEmptyStringBinder)),
jb::Member("private_key_id",
jb::Projection(&GoogleServiceAccountCredentials::private_key_id,
jb::NonEmptyStringBinder)),
jb::Member("client_email",
jb::Projection(&GoogleServiceAccountCredentials::client_email,
jb::NonEmptyStringBinder)),
jb::Member("token_uri",
jb::Projection(&GoogleServiceAccountCredentials::token_uri,
jb::DefaultInitializedValue())),
jb::DiscardExtraMembers);
Result<GoogleServiceAccountCredentials>
ParseGoogleServiceAccountCredentialsImpl(const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::InvalidArgumentError(
"Invalid GoogleServiceAccountCredentials token");
}
auto creds_token = jb::FromJson<GoogleServiceAccountCredentials>(
credentials, GoogleServiceAccountCredentialsBinder);
if (!creds_token.ok()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GoogleServiceAccountCredentials: ", creds_token.status()));
}
return creds_token;
}
Result<GoogleServiceAccountCredentials> ParseGoogleServiceAccountCredentials(
std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GoogleServiceAccountCredentials: ", source));
}
return ParseGoogleServiceAccountCredentialsImpl(credentials);
}
constexpr static auto RefreshTokenBinder = jb::Object(
jb::Member("client_id", jb::Projection(&RefreshToken::client_id,
jb::NonEmptyStringBinder)),
jb::Member("client_secret", jb::Projection(&RefreshToken::client_secret,
jb::NonEmptyStringBinder)),
jb::Member("refresh_token", jb::Projection(&RefreshToken::refresh_token,
jb::NonEmptyStringBinder)),
jb::DiscardExtraMembers);
Result<RefreshToken> ParseRefreshTokenImpl(
const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::UnauthenticatedError("Invalid RefreshToken token");
}
auto refresh_token =
jb::FromJson<RefreshToken>(credentials, RefreshTokenBinder);
if (!refresh_token.ok()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid RefreshToken: ", credentials.dump()));
}
return refresh_token;
}
Result<RefreshToken> ParseRefreshToken(std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid RefreshToken: ", source));
}
return ParseRefreshTokenImpl(credentials);
}
constexpr static auto OAuthResponseBinder = jb::Object(
jb::Member("token_type", jb::Projection(&OAuthResponse::token_type,
jb::NonEmptyStringBinder)),
jb::Member("access_token", jb::Projection(&OAuthResponse::access_token,
jb::NonEmptyStringBinder)),
jb::Member("expires_in", jb::Projection(&OAuthResponse::expires_in,
jb::LooseInteger<int64_t>(1))),
jb::DiscardExtraMembers);
Result<OAuthResponse> ParseOAuthResponseImpl(
const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::UnauthenticatedError("Invalid OAuthResponse token");
}
auto response_token =
jb::FromJson<OAuthResponse>(credentials, OAuthResponseBinder);
if (!response_token.ok()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid OAuthResponse: ", credentials.dump()));
}
return response_token;
}
Result<OAuthResponse> ParseOAuthResponse(std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid OAuthResponse: ", source));
}
return ParseOAuthResponseImpl(credentials);
}
}
} | #include "tensorstore/internal/oauth2/oauth_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::ParseGoogleServiceAccountCredentials;
using ::tensorstore::internal_oauth2::ParseOAuthResponse;
using ::tensorstore::internal_oauth2::ParseRefreshToken;
std::string GetJsonKeyFileContents() {
constexpr char kJsonKeyfilePrefix[] = R"""({
"type": "service_account",
"project_id": "foo-project",
"private_key_id": "a1a111aa1111a11a11a11aa111a111a1a1111111",
"client_email": "[email protected]",
"client_id": "100000000000000000001",
"auth_uri": "https:
"token_uri": "https:
"auth_provider_x509_cert_url": "https:
"client_x509_cert_url": "https:
)""";
return tensorstore::StrCat(kJsonKeyfilePrefix, " \"private_key\": \"",
absl::CEscape(GetFakePrivateKey()), "\" }");
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentials_Invalid) {
EXPECT_FALSE(ParseGoogleServiceAccountCredentials("{ }").ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "",
"private_key_id": "",
"client_email": "",
"token_uri": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "",
"private_key_id": "abc",
"client_email": "456"
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "",
"client_email": "456"
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456"
"token_uri": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key_id": "abc",
"client_email": "456",
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"client_email": "456",
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
})")
.ok());
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentials) {
auto result = ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456",
"token_uri": "wxy"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().private_key);
EXPECT_EQ("abc", result.value().private_key_id);
EXPECT_EQ("456", result.value().client_email);
EXPECT_EQ("wxy", result.value().token_uri);
result = ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().private_key);
EXPECT_EQ("abc", result.value().private_key_id);
EXPECT_EQ("456", result.value().client_email);
EXPECT_EQ("", result.value().token_uri);
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentialsFile) {
auto result = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("[email protected]",
result->client_email);
}
TEST(OAuthUtilTest, ParseRefreshToken_Invalid) {
EXPECT_FALSE(ParseRefreshToken("{ }").ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "",
"client_secret": "",
"refresh_token": ""
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "",
"client_secret": "abc",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": ""
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": 456
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_secret": "abc",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"json({
"error": "invalid_grant",
"error_description": "reauth related error (invalid_rapt)",
"error_uri": "https:
"error_subtype": "invalid_rapt"
})json")
.ok());
}
TEST(OAuthUtilTest, ParseRefreshToken) {
auto result = ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": "456"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().client_id);
EXPECT_EQ("abc", result.value().client_secret);
EXPECT_EQ("456", result.value().refresh_token);
}
TEST(OAuthUtilTest, ParseOAuthResponse_Invalid) {
EXPECT_FALSE(ParseOAuthResponse("{ }").ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "",
"access_token": "abc",
"expires_in": 456
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "123",
"access_token": "",
"expires_in": 456
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "123",
"access_token": "abc",
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"error": "invalid_grant",
"error_description": "reauth related error (invalid_rapt)",
"error_uri": "https:
"error_subtype": "invalid_rapt"
})json")
.ok());
}
TEST(OAuthUtilTest, ParseOAuthResponse) {
EXPECT_TRUE(ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": "456"
})")
.ok());
auto result = ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().token_type);
EXPECT_EQ("abc", result.value().access_token);
EXPECT_EQ(456, result.value().expires_in);
result = ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456,
"extra_fields": "are ignored"
})");
ASSERT_TRUE(result.ok()) << result.status();
}
TEST(OAuthUtilTest, BuildJWTClaimTest) {
using ::tensorstore::internal_oauth2::BuildJWTClaimBody;
using ::tensorstore::internal_oauth2::BuildJWTHeader;
EXPECT_EQ("eyJhbGciOiJSUzI1NiIsImtpZCI6ImEiLCJ0eXAiOiJKV1QifQ",
BuildJWTHeader("a"));
EXPECT_EQ(
"eyJhdWQiOiI0IiwiZXhwIjoxNTQ3NjY5NzAzLCJpYXQiOjE1NDc2NjYxMDMsImlzcyI6ImIi"
"LCJzY29wZSI6ImMifQ",
BuildJWTClaimBody("b", "c", "4", absl::FromUnixSeconds(1547666103),
3600));
}
TEST(OAuthUtilTest, Sign) {
using ::tensorstore::internal_oauth2::SignWithRSA256;
{
auto result = SignWithRSA256("", "something");
EXPECT_FALSE(result.ok());
}
{
constexpr char kBadKey[] =
"-----BEGIN PRIVATE KEY-----\n"
"Z23x2ZUyar6i0BQ8eJFAEN+IiUapEeCVazuxJSt4RjYfwSa/"
"p117jdZGEWD0GxMC\nlUtj+/nH3HDQjM4ltYfTPUg=\n"
"-----END PRIVATE KEY-----\n";
auto result = SignWithRSA256(kBadKey, "something");
EXPECT_FALSE(result.ok());
}
auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(creds.ok());
{
auto result = SignWithRSA256(creds->private_key, "something");
ASSERT_TRUE(result.ok());
EXPECT_EQ(
"A-sH4BVqtxu-6LECWJCb0VKGDj46pnpBpZB1KViuhG2CwugRVR6V3-"
"w8eBvAUbIRewSnXp_lWkxdy_rZBMau9VuILnLOC0t692-"
"L8WEqHsoFYBWvTZGCT5XkslVXhxt4d8jgM6U_8If4Cf3fGA4XAxpP-pyrbPGz-"
"VXn6R7jcLGOLsFtcuAXpJ9zkwYE72pGUtI_hiU-"
"tquIEayOQW9frXJlxt2oR4ld1l3p0FWibkNY8OfYPdTlRS0WcsgpWngTamHEBplJ5xNLD5"
"Ye5bG1DFqBJn0evxW0btbcfKCYuyirvgvHPsTt-"
"YMcPGo1xtlhT5c4ycEHOObFUGDpKPjljw",
*result);
}
}
TEST(OAuthUtilTest, BuildJWTRequestBody) {
using ::tensorstore::internal_oauth2::BuildSignedJWTRequest;
auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(creds.ok());
auto result =
BuildSignedJWTRequest(creds->private_key, "header", "something");
ASSERT_TRUE(result.ok());
EXPECT_EQ(
"grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&"
"assertion=header.something.LyvY9ZVG6tL34g5Wji--3G5JGQP-"
"fza47yBQIrRHJqecVUTVGuEXti_deBjSbB36gvpBOE67-U9h1wgD2VR_"
"MDx8JaQHGct04gVZdKC7m4uqu5lI8u0jqXGG4UbRwfUMZ0UCjxJfyUbg6KUR7iyiqoH5szZv"
"31rJISnM4RQvH-lQFrE6BuXpvB09Hve4T3q5mtq7E9pd5rXz_"
"vlqL5ib5tkdBEg2cbydDZHeCx5uA9qcg3hGidrU1fLgreFKu3dSvzu4qFZL3-"
"0Pnt4XMqwslx2vBbFQB7_K8Dnz10F1TA5njOvwFRWNjKM1I0cRZ5N3O1CnGv1wyAz-"
"FIcKdk5_7Q",
*result);
}
} |
663 | cpp | google/tensorstore | oauth2_auth_provider | tensorstore/internal/oauth2/oauth2_auth_provider.cc | tensorstore/internal/oauth2/oauth2_auth_provider_test.cc | #ifndef TENSORSTORE_INTERNAL_OAUTH2_OAUTH2_AUTH_PROVIDER_H_
#define TENSORSTORE_INTERNAL_OAUTH2_OAUTH2_AUTH_PROVIDER_H_
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
class OAuth2AuthProvider : public RefreshableAuthProvider {
public:
using RefreshToken = internal_oauth2::RefreshToken;
~OAuth2AuthProvider() override = default;
OAuth2AuthProvider(const RefreshToken& creds, std::string uri,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock = {});
protected:
virtual Result<internal_http::HttpResponse> IssueRequest(
std::string_view method, std::string_view uri, absl::Cord payload);
private:
Result<BearerTokenWithExpiration> Refresh() override;
std::string refresh_payload_;
std::string uri_;
std::shared_ptr<internal_http::HttpTransport> transport_;
};
}
}
#endif
#include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_oauth2 {
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
std::string MakePayload(const internal_oauth2::RefreshToken& creds) {
auto client_id = internal::PercentEncodeUriComponent(creds.client_id);
auto client_secret = internal::PercentEncodeUriComponent(creds.client_secret);
auto refresh_token = internal::PercentEncodeUriComponent(creds.refresh_token);
return tensorstore::StrCat(
"grant_type=refresh_token", "&client_id=", client_id,
"&client_secret=", client_secret, "&refresh_token=", refresh_token);
}
}
OAuth2AuthProvider::OAuth2AuthProvider(
const RefreshToken& creds, std::string uri,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
refresh_payload_(MakePayload(creds)),
uri_(std::move(uri)),
transport_(std::move(transport)) {}
Result<HttpResponse> OAuth2AuthProvider::IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord payload) {
return transport_
->IssueRequest(
HttpRequestBuilder(method, std::string{uri}).BuildRequest(),
internal_http::IssueRequestOptions(std::move(payload)))
.result();
}
Result<BearerTokenWithExpiration> OAuth2AuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto response, IssueRequest("POST", uri_, absl::Cord(refresh_payload_)));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/time/clock.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::OAuth2AuthProvider;
const char kServiceAccountInfo[] = R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})";
constexpr char kOAuthV3Url[] = "https:
class TestAuthProvider : public OAuth2AuthProvider {
public:
TestAuthProvider(const RefreshToken& creds)
: OAuth2AuthProvider(creds, kOAuthV3Url, nullptr,
[this] { return this->time; }),
time(absl::Now()),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord body) {
request.push_back(std::make_pair(std::string(uri), std::string(body)));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::pair<std::string, std::string>> request;
};
TEST(OAuth2AuthProviderTest, InitialState) {
TestAuthProvider auth({"a", "b", "c"});
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(OAuth2AuthProviderTest, NoResponse) {
TestAuthProvider auth({"a", "b", "c"});
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[0].second);
}
TEST(OAuth2AuthProviderTest, Status200) {
TestAuthProvider auth({"a", "b", "c"});
auth.responses = {
{0,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
{1,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
};
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[0].second);
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
ASSERT_EQ(2, auth.request.size());
EXPECT_EQ("https:
auth.request[1].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[1].second);
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
} |
664 | cpp | google/tensorstore | protobuf | tensorstore/serialization/protobuf.cc | tensorstore/internal/metrics/protobuf_test.cc | #ifndef TENSORSTORE_UTIL_GARBAGE_COLLECTION_PROTOBUF_H_
#define TENSORSTORE_UTIL_GARBAGE_COLLECTION_PROTOBUF_H_
#include <type_traits>
#include "google/protobuf/message_lite.h"
#include "tensorstore/util/garbage_collection/fwd.h"
namespace tensorstore {
namespace garbage_collection {
template <typename T>
struct GarbageCollection<
T, std::enable_if_t<std::is_base_of_v<google::protobuf::MessageLite, T>>> {
constexpr static bool required() { return false; }
};
}
}
#endif
#include "tensorstore/serialization/protobuf.h"
#include "absl/status/status.h"
#include "google/protobuf/message_lite.h"
#include "riegeli/messages/message_parse.h"
#include "riegeli/messages/message_serialize.h"
#include "tensorstore/serialization/serialization.h"
namespace tensorstore {
namespace serialization {
bool ProtobufSerializer::Encode(EncodeSink& sink,
const google::protobuf::MessageLite& value) {
auto status = riegeli::SerializeLengthPrefixedToWriter(
value, sink.writer(), riegeli::SerializeOptions().set_partial(true));
if (!status.ok()) {
sink.Fail(std::move(status));
return false;
}
return true;
}
bool ProtobufSerializer::Decode(DecodeSource& source,
google::protobuf::MessageLite& value) {
auto status = riegeli::ParseLengthPrefixedFromReader(
source.reader(), value, riegeli::ParseOptions().set_partial(true));
if (!status.ok()) {
source.Fail(std::move(status));
return false;
}
return true;
}
}
} | #ifndef TENSORSTORE_METRICS_DISABLED
#include "tensorstore/internal/metrics/protobuf.h"
#include <stdint.h>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/metrics.pb.h"
#include "tensorstore/internal/metrics/registry.h"
#include "tensorstore/internal/metrics/value.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::Approximately;
using ::protobuf_matchers::EqualsProto;
using ::protobuf_matchers::IgnoringRepeatedFieldOrdering;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::Counter;
using ::tensorstore::internal_metrics::DefaultBucketer;
using ::tensorstore::internal_metrics::Gauge;
using ::tensorstore::internal_metrics::GetMetricRegistry;
using ::tensorstore::internal_metrics::Histogram;
using ::tensorstore::internal_metrics::Value;
TEST(ProtobufTest, BasicConversion) {
CollectedMetric metric;
metric.metric_name = "abc";
metric.tag = "tag";
metric.values.emplace_back(
CollectedMetric::Value{{"c", "d"}, int64_t{1}, int64_t{2}});
metric.values.emplace_back(CollectedMetric::Value{{"e", "g"}, 2.3, 3.4});
metric.values.emplace_back(CollectedMetric::Value{{}, int64_t{1}});
metric.values.emplace_back(CollectedMetric::Value{{"i"}, 1.2});
metric.values.emplace_back(CollectedMetric::Value{{}, "boo"});
metric.histograms.emplace_back(CollectedMetric::Histogram{
{"h"}, 10, 1, 1, {1, 1, 1, 1, 1}});
tensorstore::metrics_proto::Metric proto;
tensorstore::internal_metrics::CollectedMetricToProto(metric, proto);
EXPECT_THAT(proto,
IgnoringRepeatedFieldOrdering(Approximately(EqualsProto(R"pb(
metric_name: "abc"
tag: "tag"
metadata {}
instance {
field: "c"
field: "d"
int_value { value: 1 max_value: 2 }
}
instance {
field: "e"
field: "g"
double_value { value: 2.3 max_value: 3.4 }
}
instance { int_value { value: 1 } }
instance {
field: "i"
double_value { value: 1.2 }
}
instance { string_value { value: "boo" } }
instance {
field: "h"
histogram {
count: 10
mean: 1
sum_of_squared_deviation: 1
bucket: 1
bucket: 1
bucket: 1
bucket: 1
bucket: 1
}
}
)pb"))));
}
TEST(ProtobufTest, FromRegistry) {
{
auto& counter =
Counter<int64_t>::New("/protobuf_test/counter1", "A metric");
counter.Increment();
counter.IncrementBy(2);
}
{
auto& counter = Counter<double>::New("/protobuf_test/counter2", "A metric");
counter.Increment();
counter.IncrementBy(2);
}
{
auto& counter = Counter<int64_t, std::string>::New(
"/protobuf_test/counter3", "field1", "A metric");
counter.Increment("a");
counter.IncrementBy(2, "b");
}
{
auto& counter = Counter<double, int>::New("/protobuf_test/counter4",
"field1", "A metric");
counter.Increment(1);
counter.IncrementBy(2, 2);
}
{
auto& gauge = Gauge<int64_t>::New("/protobuf_test/gauge1", "A metric");
gauge.Set(3);
gauge.Increment();
gauge.IncrementBy(2);
}
{
auto& gauge = Gauge<double>::New("/protobuf_test/gauge2", "A metric");
gauge.Set(3);
gauge.Increment();
gauge.IncrementBy(2);
}
{
auto& gauge = Gauge<int64_t, std::string>::New("/protobuf_test/gauge3",
"field1", "A metric");
gauge.Increment("a");
gauge.IncrementBy(2, "a");
gauge.Set(3, "b");
}
{
auto& gauge =
Gauge<double, bool>::New("/protobuf_test/gauge4", "field1", "A metric");
gauge.Increment(false);
gauge.IncrementBy(2, false);
gauge.Set(3, true);
}
{
auto& histogram =
Histogram<DefaultBucketer>::New("/protobuf_test/hist1", "A metric");
histogram.Observe(1);
histogram.Observe(2);
histogram.Observe(1000);
}
{
auto& histogram = Histogram<DefaultBucketer, int>::New(
"/protobuf_test/hist2", "field1", "A metric");
histogram.Observe(-1.0, 1);
histogram.Observe(0.11, 2);
histogram.Observe(1.2, 3);
histogram.Observe(2.1, 4);
}
{
auto& value = Value<int64_t>::New("/protobuf_test/value1", "A metric");
value.Set(3);
}
{
auto& gauge = Value<std::string>::New("/protobuf_test/value2", "A metric");
gauge.Set("foo");
}
tensorstore::metrics_proto::MetricCollection metric;
tensorstore::internal_metrics::CollectedMetricToProtoCollection(
GetMetricRegistry().CollectWithPrefix("/protobuf_test"), metric);
tensorstore::internal_metrics::SortProtoCollection(metric);
EXPECT_THAT(metric, Approximately(EqualsProto(R"pb(
metric {
metric_name: "/protobuf_test/counter1"
tag: "counter"
metadata { description: "A metric" }
instance { int_value { value: 3 } }
}
metric {
metric_name: "/protobuf_test/counter2"
tag: "counter"
metadata { description: "A metric" }
instance { double_value { value: 3 } }
}
metric {
metric_name: "/protobuf_test/counter3"
tag: "counter"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "a"
int_value { value: 1 }
}
instance {
field: "b"
int_value { value: 2 }
}
}
metric {
metric_name: "/protobuf_test/counter4"
tag: "counter"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "1"
double_value { value: 1 }
}
instance {
field: "2"
double_value { value: 2 }
}
}
metric {
metric_name: "/protobuf_test/gauge1"
tag: "gauge"
metadata { description: "A metric" }
instance { int_value { value: 6 max_value: 6 } }
}
metric {
metric_name: "/protobuf_test/gauge2"
tag: "gauge"
metadata { description: "A metric" }
instance { double_value { value: 6 max_value: 6 } }
}
metric {
metric_name: "/protobuf_test/gauge3"
tag: "gauge"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "a"
int_value { value: 3 max_value: 3 }
}
instance {
field: "b"
int_value { value: 3 max_value: 3 }
}
}
metric {
metric_name: "/protobuf_test/gauge4"
tag: "gauge"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "0"
double_value { value: 3 max_value: 3 }
}
instance {
field: "1"
double_value { value: 3 max_value: 3 }
}
}
metric {
metric_name: "/protobuf_test/hist1"
tag: "default_histogram"
metadata { description: "A metric" }
instance {
histogram {
count: 3
mean: 334.33333333333331
sum_of_squared_deviation: 664668.66666666674
bucket: -2
bucket: 1
bucket: 1
bucket: -7
bucket: 1
}
}
}
metric {
metric_name: "/protobuf_test/hist2"
tag: "default_histogram"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "1"
histogram { count: 1 mean: -1 bucket: 1 }
}
instance {
field: "2"
histogram { count: 1 mean: 0.11 bucket: -1 bucket: 1 }
}
instance {
field: "3"
histogram { count: 1 mean: 1.2 bucket: -2 bucket: 1 }
}
instance {
field: "4"
histogram { count: 1 mean: 2.1 bucket: -3 bucket: 1 }
}
}
metric {
metric_name: "/protobuf_test/value1"
tag: "value"
metadata { description: "A metric" }
instance { int_value { value: 3 } }
}
metric {
metric_name: "/protobuf_test/value2"
tag: "value"
metadata { description: "A metric" }
instance { string_value { value: "foo" } }
}
)pb")));
}
}
#endif |
665 | cpp | google/tensorstore | registry | tensorstore/serialization/registry.cc | tensorstore/serialization/registry_test.cc | #ifndef TENSORSTORE_SERIALIZATION_REGISTRY_H_
#define TENSORSTORE_SERIALIZATION_REGISTRY_H_
#include <memory>
#include <string_view>
#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include "absl/base/no_destructor.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/serialization.h"
namespace tensorstore {
namespace serialization {
class Registry {
public:
struct Entry {
using Encode = bool (*)(EncodeSink& sink, const void* value);
using Decode = bool (*)(DecodeSource& source, void* value);
const std::type_info& type;
std::string_view id;
Encode encode;
Decode decode;
std::type_index type_index() const { return type; }
};
Registry();
~Registry();
void Add(const Entry& entry);
[[nodiscard]] bool Encode(EncodeSink& sink, const void* value,
const std::type_info& type);
[[nodiscard]] bool Decode(DecodeSource& source, void* value);
private:
internal::HeterogeneousHashSet<const Entry*, std::string_view, &Entry::id>
by_id_;
internal::HeterogeneousHashSet<const Entry*, std::type_index,
&Entry::type_index>
by_type_;
};
template <typename Ptr>
Registry& GetRegistry() {
static absl::NoDestructor<Registry> registry;
return *registry;
}
template <typename Ptr, typename Derived>
void Register() {
using Base =
std::remove_const_t<typename std::pointer_traits<Ptr>::element_type>;
static_assert(std::has_virtual_destructor_v<Base>);
static_assert(std::is_base_of_v<Base, Derived>);
static const Registry::Entry entry{
typeid(Derived),
Derived::id,
+[](EncodeSink& sink, const void* value) -> bool {
return serialization::Encode(
sink, *static_cast<const Derived*>(
static_cast<const Ptr*>(value)->get()));
},
+[](DecodeSource& source, void* value) -> bool {
auto& ptr = *static_cast<Ptr*>(value);
ptr.reset(new Derived);
return serialization::Decode(
source,
*const_cast<Derived*>(static_cast<const Derived*>(ptr.get())));
},
};
GetRegistry<Ptr>().Add(entry);
}
template <typename Ptr>
struct RegistrySerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink, const Ptr& value) {
return GetRegistry<Ptr>().Encode(sink, &value, typeid(*value));
}
[[nodiscard]] static bool Decode(DecodeSource& source, Ptr& value) {
return GetRegistry<Ptr>().Decode(source, &value);
}
};
}
}
#endif
#include "tensorstore/serialization/registry.h"
#include "absl/log/absl_log.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
Registry::Registry() = default;
Registry::~Registry() = default;
void Registry::Add(const Entry& entry) {
if (!by_id_.insert(&entry).second) {
ABSL_LOG(FATAL) << "Duplicate serializable id registration: " << entry.id;
}
if (!by_type_.insert(&entry).second) {
ABSL_LOG(FATAL) << "Duplicate serializable type registration: "
<< entry.type.name();
}
}
bool Registry::Encode(EncodeSink& sink, const void* value,
const std::type_info& type) {
auto it = by_type_.find(std::type_index(type));
if (it == by_type_.end()) {
sink.Fail(absl::InternalError(tensorstore::StrCat(
"Dynamic type not registered for serialization: ", type.name())));
return false;
}
auto& entry = **it;
return serialization::Encode(sink, entry.id) && entry.encode(sink, value);
}
bool Registry::Decode(DecodeSource& source, void* value) {
std::string_view id;
if (!serialization::Decode(source, id)) return false;
auto it = by_id_.find(id);
if (it == by_id_.end()) {
source.Fail(absl::DataLossError(tensorstore::StrCat(
"Dynamic id not registered for serialization: ", id)));
return false;
}
auto& entry = **it;
return entry.decode(source, value);
}
}
} | #include "tensorstore/serialization/registry.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::serialization::Register;
using ::tensorstore::serialization::RegistrySerializer;
using ::tensorstore::serialization::SerializationRoundTrip;
struct Base {
virtual ~Base() = default;
};
using BasePtr = std::shared_ptr<const Base>;
struct DerivedA : public Base {
constexpr static const char id[] = "a";
int x;
static constexpr auto ApplyMembers = [](auto&& x, auto f) { return f(x.x); };
};
struct DerivedB : public Base {
constexpr static const char id[] = "b";
std::string y;
static constexpr auto ApplyMembers = [](auto&& x, auto f) { return f(x.y); };
};
static const auto init = [] {
Register<BasePtr, DerivedA>();
Register<BasePtr, DerivedB>();
return nullptr;
}();
TEST(RegistryTest, RoundTripA) {
auto ptr = std::make_shared<DerivedA>();
ptr->x = 42;
EXPECT_THAT(
SerializationRoundTrip(BasePtr(ptr), RegistrySerializer<BasePtr>{}),
::testing::Optional(
::testing::Pointee(::testing::WhenDynamicCastTo<const DerivedA&>(
::testing::Field(&DerivedA::x, 42)))));
}
TEST(RegistryTest, RoundTripB) {
auto ptr = std::make_shared<DerivedB>();
ptr->y = "abc";
EXPECT_THAT(
SerializationRoundTrip(BasePtr(ptr), RegistrySerializer<BasePtr>{}),
::testing::Optional(
::testing::Pointee(::testing::WhenDynamicCastTo<const DerivedB&>(
::testing::Field(&DerivedB::y, "abc")))));
}
} |
666 | cpp | google/tensorstore | prometheus | tensorstore/internal/metrics/prometheus.cc | tensorstore/internal/metrics/prometheus_test.cc | #ifndef TENSORSTORE_INTERNAL_METRICS_PROMETHEUS_H_
#define TENSORSTORE_INTERNAL_METRICS_PROMETHEUS_H_
#include <map>
#include <string>
#include "absl/functional/function_ref.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_metrics {
struct PushGatewayConfig {
std::string host;
std::string job;
std::string instance;
std::map<std::string, std::string> additional_labels;
};
Result<internal_http::HttpRequest> BuildPrometheusPushRequest(
const PushGatewayConfig& config);
void PrometheusExpositionFormat(
const CollectedMetric& metric,
absl::FunctionRef<void(std::string)> handle_line);
}
}
#endif
#include "tensorstore/internal/metrics/prometheus.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_metrics {
namespace {
static inline constexpr internal::AsciiSet kDigit{"0123456789"};
static inline constexpr internal::AsciiSet kMetricFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_:"};
static inline constexpr internal::AsciiSet kLabelFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"};
static inline constexpr internal::AsciiSet kValueUnreserved{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.~()"};
bool IsLegalPrometheusLabel(std::string_view label) {
if (label.empty() || !kLabelFirst.Test(label[0])) return false;
for (char c : label) {
if (!kLabelFirst.Test(c) && !kDigit.Test(c)) return false;
}
return true;
}
absl::Status AppendLabelValue(std::string* url, std::string_view label,
std::string_view value) {
if (!IsLegalPrometheusLabel(label)) {
return absl::InvalidArgumentError("");
}
if (value.empty()) {
absl::StrAppend(url, "/", label, "@base64/=");
}
for (char c : value) {
if (!kValueUnreserved.Test(c)) {
absl::StrAppend(url, "/", label, "@base64/",
absl::WebSafeBase64Escape(value));
return absl::OkStatus();
}
}
absl::StrAppend(url, "/", label, "/", value);
return absl::OkStatus();
}
std::string AsPrometheusString(std::string_view in, internal::AsciiSet first) {
while (!in.empty() && !first.Test(in[0])) {
in = in.substr(1);
}
while (!in.empty() && !first.Test(in[in.size() - 1]) &&
!kDigit.Test(in[in.size() - 1])) {
in = in.substr(0, in.size() - 1);
}
std::string raw(in);
for (char& c : raw) {
if (!first.Test(c) && !kDigit.Test(c)) c = '_';
}
return raw;
}
struct PrometheusValueLine {
const std::string& metric_name;
const char* suffix;
const std::string& label_str;
std::string operator()(int64_t x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(double x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(const std::string& x) { return {}; }
std::string operator()(std::monostate) { return {}; }
};
}
Result<internal_http::HttpRequest> BuildPrometheusPushRequest(
const PushGatewayConfig& config) {
if (config.job.empty()) {
return absl::InvalidArgumentError("PushGatewayConfig bad job");
}
if (!absl::StartsWith(config.host, "http:
!absl::StartsWith(config.host, "https:
return absl::InvalidArgumentError("PushGatewayConfig bad host");
}
std::string url = config.host;
if (!absl::EndsWith(url, "/")) {
absl::StrAppend(&url, "/metrics");
} else {
absl::StrAppend(&url, "metrics");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, "job", config.job));
if (!config.instance.empty()) {
TENSORSTORE_RETURN_IF_ERROR(
AppendLabelValue(&url, "instance", config.instance));
}
for (const auto& [k, v] : config.additional_labels) {
if (absl::EqualsIgnoreCase("job", k) ||
absl::EqualsIgnoreCase("instance", k)) {
return absl::InvalidArgumentError(
"PushGatewayConfig additional_labels cannot contain job or instance");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, k, v));
}
return internal_http::HttpRequestBuilder("PUT", std::move(url))
.BuildRequest();
}
void PrometheusExpositionFormat(
const CollectedMetric& metric,
absl::FunctionRef<void(std::string)> handle_line) {
std::string metric_name =
AsPrometheusString(metric.metric_name, kMetricFirst);
if (metric_name.empty()) return;
std::vector<std::string> prometheus_fields;
prometheus_fields.reserve(metric.field_names.size());
for (size_t i = 0; i < metric.field_names.size(); ++i) {
prometheus_fields.push_back(
AsPrometheusString(metric.field_names[i], kLabelFirst));
}
auto build_label_str = [&](auto& v) -> std::string {
assert(metric.field_names.size() == v.fields.size());
if (v.fields.empty()) return {};
std::string label_str;
for (size_t i = 0; i < metric.field_names.size(); ++i) {
absl::StrAppend(&label_str, i == 0 ? "" : ", ", prometheus_fields[i],
"=\"", absl::CEscape(v.fields[i]), "\"");
}
return label_str;
};
if (!metric.values.empty()) {
std::string line;
for (const auto& v : metric.values) {
std::string label_str = build_label_str(v);
line =
std::visit(PrometheusValueLine{metric_name, " ", label_str}, v.value);
if (!line.empty()) {
handle_line(std::move(line));
}
line = std::visit(PrometheusValueLine{metric_name, "_max ", label_str},
v.max_value);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
if (!metric.histograms.empty()) {
std::string line;
for (const auto& v : metric.histograms) {
std::string label_str = build_label_str(v);
struct Histogram {
std::vector<int64_t> buckets;
};
line = PrometheusValueLine{metric_name, "_mean ", label_str}(v.mean);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_count ", label_str}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_variance ",
label_str}(v.sum_of_squared_deviation);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_sum ",
label_str}(v.mean * v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) --end;
for (size_t i = 0; i < end; i++) {
std::string bucket_labels = absl::StrCat(
label_str, label_str.empty() ? "" : ", ", "le=\"", i, "\"");
line = PrometheusValueLine{metric_name, "_bucket ",
bucket_labels}(v.buckets[i]);
if (!line.empty()) {
handle_line(std::move(line));
}
}
std::string bucket_labels =
absl::StrCat(label_str, label_str.empty() ? "" : ", ", "le=\"+Inf\"");
line =
PrometheusValueLine{metric_name, "_bucket ", bucket_labels}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
}
}
} | #include "tensorstore/internal/metrics/prometheus.h"
#include <stdint.h>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/collect.h"
namespace {
using ::tensorstore::internal_metrics::BuildPrometheusPushRequest;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::PrometheusExpositionFormat;
using ::tensorstore::internal_metrics::PushGatewayConfig;
TEST(PrometheusTest, BuildPrometheusPushRequest) {
auto request = BuildPrometheusPushRequest(
PushGatewayConfig{"http:
EXPECT_TRUE(request.has_value());
EXPECT_EQ("http:
request->url);
}
TEST(PrometheusTest, PrometheusExpositionFormat) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::string> lines;
PrometheusExpositionFormat(
metric, [&](std::string line) { lines.push_back(std::move(line)); });
return lines;
};
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
EXPECT_THAT(format_lines(metric), ::testing::IsEmpty());
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
h.mean = 1;
h.sum_of_squared_deviation = 1;
h.buckets.push_back(0);
h.buckets.push_back(1);
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
v.max_value = int64_t{2};
EXPECT_THAT(format_lines(metric),
::testing::ElementsAre(
"metric_name {field_name=\"vv\"} 1",
"metric_name_max {field_name=\"vv\"} 2",
"metric_name_mean {field_name=\"hh\"} 1",
"metric_name_count {field_name=\"hh\"} 1",
"metric_name_variance {field_name=\"hh\"} 1",
"metric_name_sum {field_name=\"hh\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"0\"} 0",
"metric_name_bucket {field_name=\"hh\", le=\"1\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"+Inf\"} 1"));
}
} |
667 | cpp | google/tensorstore | collect | tensorstore/internal/metrics/collect.cc | tensorstore/internal/metrics/collect_test.cc | #ifndef TENSORSTORE_INTERNAL_METRICS_COLLECT_H_
#define TENSORSTORE_INTERNAL_METRICS_COLLECT_H_
#include <string>
#include <string_view>
#include <variant>
#include <vector>
#include "absl/functional/function_ref.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/metrics/metadata.h"
namespace tensorstore {
namespace internal_metrics {
struct CollectedMetric {
std::string_view metric_name;
std::vector<std::string_view> field_names;
MetricMetadata metadata;
std::string_view tag;
struct Value {
std::vector<std::string> fields;
std::variant<std::monostate, int64_t, double, std::string> value;
std::variant<std::monostate, int64_t, double> max_value = std::monostate{};
};
std::vector<Value> values;
struct Histogram {
std::vector<std::string> fields;
int64_t count;
double mean;
double sum_of_squared_deviation;
std::vector<int64_t> buckets;
};
std::vector<Histogram> histograms;
};
bool IsCollectedMetricNonZero(const CollectedMetric& metric);
void FormatCollectedMetric(
const CollectedMetric& metric,
absl::FunctionRef<void(bool has_value, std::string formatted_line)>
handle_line);
::nlohmann::json CollectedMetricToJson(const CollectedMetric& metric);
}
}
#endif
#include "tensorstore/internal/metrics/collect.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include <nlohmann/json.hpp>
namespace tensorstore {
namespace internal_metrics {
namespace {
struct IsNonZero {
bool operator()(int64_t x) { return x != 0; }
bool operator()(double x) { return x != 0; }
bool operator()(const std::string& x) { return !x.empty(); }
bool operator()(std::monostate) { return false; }
};
struct VisitStrAppend {
std::string* line;
const char* before;
const char* after;
void operator()(int64_t x) { absl::StrAppend(line, before, x, after); }
void operator()(double x) { absl::StrAppend(line, before, x, after); }
void operator()(const std::string& x) {
absl::StrAppend(line, before, x, after);
}
void operator()(std::monostate) {}
};
struct VisitJsonDictify {
::nlohmann::json::object_t& dest;
const char* key;
void operator()(int64_t x) { dest[key] = x; }
void operator()(double x) { dest[key] = x; }
void operator()(const std::string& x) { dest[key] = x; }
void operator()(std::monostate) {}
};
}
bool IsCollectedMetricNonZero(const CollectedMetric& metric) {
if (!metric.values.empty()) {
for (const auto& v : metric.values) {
if (std::visit(IsNonZero{}, v.value)) return true;
if (std::visit(IsNonZero{}, v.max_value)) return true;
}
}
if (!metric.histograms.empty()) {
for (const auto& v : metric.histograms) {
if (v.count != 0) return true;
}
}
return false;
}
void FormatCollectedMetric(
const CollectedMetric& metric,
absl::FunctionRef<void(bool has_value, std::string formatted_line)>
handle_line) {
std::string field_names;
if (!metric.field_names.empty()) {
field_names = absl::StrJoin(metric.field_names, ", ");
}
auto metric_name_with_fields = [&](auto& v) -> std::string {
if (v.fields.empty()) return std::string(metric.metric_name);
return absl::StrCat(metric.metric_name, "<", field_names, ">[",
absl::StrJoin(v.fields, ", "), "]");
};
if (!metric.values.empty()) {
for (auto& v : metric.values) {
bool has_value = false;
std::string line = metric_name_with_fields(v);
if (std::holds_alternative<std::monostate>(v.max_value) &&
std::holds_alternative<std::monostate>(v.value)) {
} else {
has_value |= std::visit(IsNonZero{}, v.value);
has_value |= std::visit(IsNonZero{}, v.max_value);
if (std::holds_alternative<std::monostate>(v.max_value)) {
std::visit(VisitStrAppend{&line, "=", ""}, v.value);
} else if (std::holds_alternative<std::monostate>(v.value)) {
std::visit(VisitStrAppend{&line, "=", ""}, v.max_value);
} else {
std::visit(VisitStrAppend{&line, "={value=", ""}, v.value);
std::visit(VisitStrAppend{&line, ", max=", "}"}, v.max_value);
}
}
handle_line(has_value, std::move(line));
}
}
if (!metric.histograms.empty()) {
for (auto& v : metric.histograms) {
std::string line = metric_name_with_fields(v);
absl::StrAppend(&line, "={count=", v.count, " mean=", v.mean,
" buckets=[");
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) end--;
auto it = v.buckets.begin();
if (end > 0) {
absl::StrAppend(&line, *it);
}
for (size_t i = 1; i < end;) {
size_t j = std::min(i + 10, end);
absl::StrAppend(&line, ", ");
absl::StrAppend(&line, absl::StrJoin(it + i, it + j, ","));
i = j;
}
absl::StrAppend(&line, "]}");
handle_line(v.count, std::move(line));
}
}
}
::nlohmann::json CollectedMetricToJson(const CollectedMetric& metric) {
::nlohmann::json::object_t result;
result["name"] = metric.metric_name;
auto set_field_keys = [&](auto& v, ::nlohmann::json::object_t& h) {
assert(metric.field_names.size() == v.fields.size());
for (size_t i = 0; i < metric.field_names.size(); ++i) {
if (metric.field_names[i] == "value" ||
metric.field_names[i] == "count" ||
metric.field_names[i] == "max_value" ||
metric.field_names[i] == "sum") {
h[absl::StrCat("_", metric.field_names[i])] = v.fields[i];
} else {
h[std::string(metric.field_names[i])] = v.fields[i];
}
}
};
std::vector<::nlohmann::json> values;
if (!metric.values.empty()) {
for (const auto& v : metric.values) {
::nlohmann::json::object_t tmp{};
set_field_keys(v, tmp);
std::visit(VisitJsonDictify{tmp, "value"}, v.value);
std::visit(VisitJsonDictify{tmp, "max_value"}, v.max_value);
values.push_back(std::move(tmp));
}
}
if (!metric.histograms.empty()) {
for (const auto& v : metric.histograms) {
::nlohmann::json::object_t tmp{};
set_field_keys(v, tmp);
tmp["count"] = v.count;
tmp["mean"] = v.mean;
tmp["sum_of_squared_deviation"] = v.sum_of_squared_deviation;
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) end--;
auto it = v.buckets.begin();
for (size_t i = 0; i < end; ++i) {
tmp[absl::StrCat(i)] = *it++;
}
values.push_back(std::move(tmp));
}
}
result["values"] = std::move(values);
return result;
}
}
} | #include "tensorstore/internal/metrics/collect.h"
#include <stdint.h>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::CollectedMetricToJson;
using ::tensorstore::internal_metrics::FormatCollectedMetric;
using ::tensorstore::internal_metrics::IsCollectedMetricNonZero;
using ::testing::ElementsAre;
using ::testing::Pair;
TEST(CollectTest, IsCollectedMetricNonZero) {
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
EXPECT_FALSE(IsCollectedMetricNonZero(metric));
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 0;
EXPECT_FALSE(IsCollectedMetricNonZero(metric));
h.count = 1;
EXPECT_TRUE(IsCollectedMetricNonZero(metric));
metric.histograms.clear();
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
EXPECT_TRUE(IsCollectedMetricNonZero(metric));
v.value = std::monostate{};
v.max_value = int64_t{1};
EXPECT_TRUE(IsCollectedMetricNonZero(metric));
v.max_value = std::monostate{};
}
TEST(CollectTest, FormatCollectedMetric) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::pair<bool, std::string>> lines;
FormatCollectedMetric(
metric, [&](bool has_value, std::string formatted_line) {
lines.push_back(std::make_pair(has_value, std::move(formatted_line)));
});
return lines;
};
EXPECT_THAT(format_lines({}), testing::IsEmpty());
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
{
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
EXPECT_THAT(format_lines(metric),
ElementsAre(Pair(true, "metric_name<field_name>[vv]=1")));
}
{
metric.values.clear();
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
EXPECT_THAT(format_lines(metric),
ElementsAre(Pair(true,
"metric_name<field_name>[hh]={count=1 "
"mean=0 buckets=[]}")));
}
}
TEST(CollectTest, CollectedMetricToJson) {
EXPECT_THAT(
CollectedMetricToJson({}),
MatchesJson({{"name", ""}, {"values", nlohmann::json::array_t()}}));
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
{
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
EXPECT_THAT(CollectedMetricToJson(metric),
MatchesJson({{"name", "metric_name"},
{"values",
{{
{"value", 1},
{"field_name", "vv"},
}}}}));
}
{
metric.values.clear();
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
EXPECT_THAT(CollectedMetricToJson(metric),
MatchesJson({{"name", "metric_name"},
{"values",
{{
{"count", 1},
{"field_name", "hh"},
{"mean", 0.0},
{"sum_of_squared_deviation", 0.0},
}}}}));
}
}
} |
668 | cpp | google/tensorstore | storage | tensorstore/internal/poly/storage.cc | tensorstore/internal/poly/storage_test.cc | #ifndef TENSORSTORE_INTERNAL_POLY_STORAGE_H_
#define TENSORSTORE_INTERNAL_POLY_STORAGE_H_
#include <cassert>
#include <cstddef>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
#include <typeinfo>
#include <utility>
namespace tensorstore {
namespace internal_poly_storage {
constexpr static inline size_t kAlignment = alignof(std::max_align_t);
template <class T>
static inline constexpr bool CanBeStoredInline =
std::is_nothrow_move_constructible_v<T> && alignof(T) <= kAlignment &&
(kAlignment % alignof(T) == 0);
static inline constexpr size_t ActualInlineSize(size_t InlineSize) {
return InlineSize <= sizeof(void*)
? sizeof(void*)
: ((InlineSize + sizeof(void*) - 1) / sizeof(void*)) *
sizeof(void*);
}
#ifdef _MSC_VER
using TypeId = const char*;
template <typename T>
inline constexpr char type_id_impl = 0;
template <typename T>
inline constexpr TypeId GetTypeId = &type_id_impl<T>;
#else
using TypeId = const std::type_info&;
template <typename T>
inline constexpr TypeId GetTypeId = typeid(T);
#endif
template <typename T>
T& Launder(void* storage) {
return *std::launder(reinterpret_cast<T*>(storage));
}
template <typename T>
const T& Launder(const void* storage) {
return *std::launder(reinterpret_cast<const T*>(storage));
}
template <typename Self>
struct InlineStorageOps {
static_assert(
std::is_same_v<Self, std::remove_cv_t<std::remove_reference_t<Self>>>);
using Type = Self;
static constexpr bool UsesInlineStorage() { return true; }
static Self& Get(void* storage) { return Launder<Self>(storage); }
template <typename... Arg>
static void Construct(void* storage, Arg&&... arg) {
new (storage) Self(std::forward<Arg>(arg)...);
}
static void Destroy(void* storage) { Launder<Self>(storage).~Self(); }
static void Copy(void* dest, const void* source) {
new (dest) Self(Launder<Self>(source));
}
static void Relocate(void* dest, void* source) {
Self& s = Launder<Self>(source);
new (dest) Self(std::move(s));
s.~Self();
}
};
template <typename Self>
struct HeapStorageOps {
static_assert(
std::is_same_v<Self, std::remove_cv_t<std::remove_reference_t<Self>>>);
using Type = Self;
static constexpr bool UsesInlineStorage() { return false; }
static Self& Get(void* storage) { return *Launder<Self*>(storage); }
template <typename... Arg>
static void Construct(void* storage, Arg&&... arg) {
Launder<Self*>(storage) = new Self(std::forward<Arg>(arg)...);
}
static void Destroy(void* storage) { delete Launder<Self*>(storage); }
static void Copy(void* dest, const void* source) {
Launder<Self*>(dest) = new Self(*Launder<Self*>(source));
}
static void Relocate(void* dest, void* source) {
Self*& s = Launder<Self*>(source);
Launder<Self*>(dest) = s;
s = nullptr;
}
};
struct VTableBase {
using Destroy = void (*)(void* obj);
using Relocate = void (*)(void* dest, void* source);
using Copy = void (*)(void* dest, const void* source);
std::add_const_t<TypeId> type;
Destroy destroy;
Relocate relocate;
Copy copy;
};
struct NullVTable {
static void Destroy(void*) {}
static void Relocate(void*, void*) {}
static void Copy(void*, const void*) {}
constexpr static VTableBase vtable = {
GetTypeId<void>,
&Destroy,
&Relocate,
&Copy,
};
};
template <typename Ops>
constexpr VTableBase::Copy GetCopyImpl(std::true_type copyable) {
return &Ops::Copy;
}
template <typename Ops>
constexpr VTableBase::Copy GetCopyImpl(std::false_type copyable) {
return nullptr;
}
template <typename Ops, bool Copyable>
constexpr VTableBase GetVTableBase() {
return {
GetTypeId<typename Ops::Type>,
&Ops::Destroy,
&Ops::Relocate,
GetCopyImpl<Ops>(std::integral_constant<bool, Copyable>{}),
};
}
template <size_t InlineSize, bool Copyable>
class StorageImpl {
friend class StorageImpl<InlineSize, true>;
static_assert(InlineSize == ActualInlineSize(InlineSize));
public:
template <typename T>
using Ops =
std::conditional_t<(sizeof(T) <= InlineSize && CanBeStoredInline<T>),
InlineStorageOps<T>, HeapStorageOps<T>>;
using VTable = VTableBase;
StorageImpl() = default;
StorageImpl(StorageImpl&& other) noexcept { Construct(std::move(other)); }
StorageImpl& operator=(StorageImpl&& other) noexcept {
vtable_->destroy(&storage_);
Construct(std::move(other));
return *this;
}
~StorageImpl() { vtable_->destroy(storage()); }
bool null() const { return vtable_->type == GetTypeId<void>; }
void* storage() const { return const_cast<char*>(&storage_[0]); }
const VTable* vtable() const { return vtable_; }
template <typename T>
T* get_if() {
return (GetTypeId<T> != vtable_->type) ? nullptr : &Ops<T>::Get(storage());
}
template <typename T>
const T* get_if() const {
return (GetTypeId<T> != vtable_->type) ? nullptr : &Ops<T>::Get(storage());
}
template <typename T, typename... U>
void ConstructT(const VTable* vtable, U&&... arg) {
vtable_ = vtable;
Ops<T>::Construct(storage(), std::forward<U>(arg)...);
}
void Construct(StorageImpl&& other) {
vtable_ = std::exchange(other.vtable_, &NullVTable::vtable);
vtable_->relocate(storage(), other.storage());
}
void Destroy() {
std::exchange(vtable_, &NullVTable::vtable)->destroy(storage());
}
private:
alignas(kAlignment) char storage_[InlineSize];
const VTable* vtable_ = &NullVTable::vtable;
};
template <size_t InlineSize>
class StorageImpl<InlineSize, true> : public StorageImpl<InlineSize, false> {
using Base = StorageImpl<InlineSize, false>;
public:
using Base::Base;
StorageImpl(const StorageImpl& other) { this->CopyConstruct(other); }
StorageImpl(StorageImpl&&) = default;
StorageImpl& operator=(StorageImpl&& other) = default;
StorageImpl& operator=(const StorageImpl& other) {
this->Destroy();
this->CopyConstruct(other);
return *this;
}
void CopyConstruct(const StorageImpl& other) {
this->vtable_ = other.vtable_;
this->vtable_->copy(this->storage(), other.storage());
}
};
template <size_t TargetInlineSize, bool Copyable>
using Storage = StorageImpl<ActualInlineSize(TargetInlineSize), Copyable>;
}
}
#endif
#include "tensorstore/internal/poly/storage.h"
namespace tensorstore {
namespace internal_poly_storage {
constexpr VTableBase NullVTable::vtable;
}
} | #include "tensorstore/internal/poly/storage.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_poly_storage::ActualInlineSize;
using ::tensorstore::internal_poly_storage::GetVTableBase;
using ::tensorstore::internal_poly_storage::HeapStorageOps;
using ::tensorstore::internal_poly_storage::InlineStorageOps;
using ::tensorstore::internal_poly_storage::Storage;
using ::tensorstore::internal_poly_storage::VTableBase;
static constexpr size_t kStorageSize = ActualInlineSize(8);
static_assert(80 == ActualInlineSize(79));
static_assert(80 == ActualInlineSize(80));
TEST(ObjectOps, InlineTrivial) {
using S = Storage<kStorageSize, true>;
using Ops = typename S::Ops<int>;
static_assert(std::is_same_v<Ops, InlineStorageOps<int>>);
static_assert(Ops::UsesInlineStorage());
S a, b;
EXPECT_EQ(nullptr, a.template get_if<int>());
Ops::Construct(a.storage(), 7);
Ops::Relocate(b.storage(), a.storage());
Ops::Copy(a.storage(), b.storage());
EXPECT_EQ(7, Ops::Get(a.storage()));
EXPECT_EQ(7, Ops::Get(b.storage()));
Ops::Destroy(a.storage());
Ops::Destroy(b.storage());
}
TEST(ObjectOps, NotInlineTrivial) {
struct X {
double x;
double y;
double z;
};
using S = Storage<kStorageSize, true>;
using Ops = typename S::Ops<X>;
static_assert(std::is_same_v<Ops, HeapStorageOps<X>>);
static_assert(!Ops::UsesInlineStorage());
S a, b;
EXPECT_EQ(nullptr, a.get_if<int>());
Ops::Construct(a.storage(), X{7, 8, 9});
Ops::Relocate(b.storage(), a.storage());
Ops::Copy(a.storage(), b.storage());
EXPECT_EQ(7, Ops::Get(a.storage()).x);
EXPECT_EQ(9, Ops::Get(b.storage()).z);
Ops::Destroy(a.storage());
Ops::Destroy(b.storage());
}
template <typename Ops, bool Copyable>
static const VTableBase* GetVTable() {
static VTableBase vtable = GetVTableBase<Ops, Copyable>();
return &vtable;
}
TEST(Storage, MoveOnly) {
using S = Storage<16, false>;
using Ops = typename S::Ops<int>;
{
S a;
EXPECT_TRUE(a.null());
EXPECT_EQ(nullptr, a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, false>(), 7);
ASSERT_FALSE(a.null());
ASSERT_NE(nullptr, a.get_if<int>());
EXPECT_EQ(7, *a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, false>(), 8);
S b = std::move(a);
ASSERT_FALSE(b.null());
ASSERT_NE(nullptr, b.get_if<int>());
EXPECT_EQ(8, *b.get_if<int>());
S c(std::move(b));
ASSERT_FALSE(c.null());
ASSERT_NE(nullptr, c.get_if<int>());
EXPECT_EQ(8, *c.get_if<int>());
}
}
TEST(Storage, Copy) {
using S = Storage<16, true>;
using Ops = typename S::Ops<int>;
{
S a;
EXPECT_TRUE(a.null());
EXPECT_EQ(nullptr, a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, true>(), 7);
ASSERT_FALSE(a.null());
ASSERT_NE(nullptr, a.get_if<int>());
EXPECT_EQ(7, *a.get_if<int>());
}
{
S a;
a.ConstructT<int>(GetVTable<Ops, true>(), 8);
S b = a;
ASSERT_NE(nullptr, b.get_if<int>());
EXPECT_EQ(8, *b.get_if<int>());
S c(a);
EXPECT_FALSE(a.null());
ASSERT_FALSE(c.null());
ASSERT_NE(nullptr, c.get_if<int>());
EXPECT_EQ(8, *c.get_if<int>());
a.Destroy();
EXPECT_TRUE(a.null());
EXPECT_EQ(nullptr, a.get_if<int>());
}
}
} |
669 | cpp | google/tensorstore | concurrent | tensorstore/internal/testing/concurrent.cc | tensorstore/internal/testing/concurrent_test.cc | #ifndef TENSORSTORE_INTERNAL_TESTING_CONCURRENT_H_
#define TENSORSTORE_INTERNAL_TESTING_CONCURRENT_H_
#include <stddef.h>
#include <algorithm>
#include <atomic>
#include <ctime>
#include <thread>
#include <type_traits>
#include <utility>
#include "tensorstore/internal/multi_barrier.h"
#include "tensorstore/internal/thread/thread.h"
namespace tensorstore {
namespace internal_testing {
#ifdef _WIN32
class TestConcurrentLock {
public:
TestConcurrentLock();
~TestConcurrentLock();
TestConcurrentLock(TestConcurrentLock&& other) noexcept = delete;
TestConcurrentLock& operator=(TestConcurrentLock&& other) = delete;
TestConcurrentLock(const TestConcurrentLock& other) = delete;
TestConcurrentLock& operator=(const TestConcurrentLock& other) = delete;
private:
void* handle_;
};
#endif
template <typename Initialize, typename Finalize, typename... ConcurrentOps>
void TestConcurrent(size_t num_iterations, Initialize initialize,
Finalize finalize, ConcurrentOps... concurrent_ops) {
#ifdef _WIN32
TestConcurrentLock lock;
#endif
std::atomic<size_t> counter(0);
constexpr size_t concurrent_op_size = sizeof...(ConcurrentOps);
internal::MultiBarrier sync_point(1 + concurrent_op_size);
size_t sync_mask = std::min(4u, std::thread::hardware_concurrency()) - 1;
if (sync_mask == 2) sync_mask--;
internal::Thread threads[]{internal::Thread({"concurrent"}, [&] {
for (size_t iteration = 0; iteration < num_iterations; ++iteration) {
sync_point.Block();
size_t current = counter.fetch_add(1, std::memory_order_acq_rel) + 1;
size_t target = std::min(current | sync_mask, concurrent_op_size);
while (counter.load() < target) {
std::this_thread::yield();
}
concurrent_ops();
sync_point.Block();
}
})...};
for (size_t iteration = 0; iteration < num_iterations; ++iteration) {
initialize();
counter = 0;
sync_point.Block();
sync_point.Block();
finalize();
}
for (auto& t : threads) {
t.Join();
}
}
template <typename Initialize, typename Finalize, typename ConcurrentOp,
size_t... Is>
void TestConcurrent(std::index_sequence<Is...>, size_t num_iterations,
Initialize initialize, Finalize finalize,
ConcurrentOp concurrent_op) {
TestConcurrent(
num_iterations, std::move(initialize), std::move(finalize),
[&] { concurrent_op(std::integral_constant<size_t, Is>{}); }...);
}
template <size_t NumConcurrentOps, typename Initialize, typename Finalize,
typename ConcurrentOp>
void TestConcurrent(size_t num_iterations, Initialize initialize,
Finalize finalize, ConcurrentOp concurrent_op) {
return TestConcurrent(std::make_index_sequence<NumConcurrentOps>{},
num_iterations, std::move(initialize),
std::move(finalize), std::move(concurrent_op));
}
}
}
#endif
#include "tensorstore/internal/testing/concurrent.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
namespace tensorstore {
namespace internal_testing {
#ifdef _WIN32
TestConcurrentLock::TestConcurrentLock() {
handle_ = ::CreateMutexA(nullptr,
FALSE,
"TensorStoreTestConcurrentMutex");
ABSL_CHECK(handle_ != nullptr);
if (::WaitForSingleObject(handle_, 0 ) != WAIT_OBJECT_0) {
ABSL_LOG(INFO) << "Waiting on WIN32 Concurrent Lock";
ABSL_CHECK(::WaitForSingleObject(handle_, INFINITE) == WAIT_OBJECT_0);
}
}
TestConcurrentLock::~TestConcurrentLock() {
ABSL_CHECK(::ReleaseMutex(handle_));
::CloseHandle(handle_);
}
#endif
}
} | #include "tensorstore/internal/testing/concurrent.h"
#include <atomic>
#include <type_traits>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h"
namespace {
using ::tensorstore::internal_testing::TestConcurrent;
TEST(TestConcurrent, EnsureContentionHappens) {
static constexpr int kIterations = 100;
static constexpr int kN = 20;
absl::Mutex lock;
int uncontended{0};
TestConcurrent<kN>(
kIterations,
[&] {},
[&] {},
[&](auto) {
if (lock.TryLock()) {
uncontended++;
lock.Unlock();
}
});
int contended = (kIterations * kN) - uncontended;
ABSL_LOG(INFO) << "Contended in " << contended << " of 2000 iterations.";
}
TEST(TestConcurrent, Example1) {
static constexpr int kIterations = 100;
std::atomic<int> sum{0};
TestConcurrent(
kIterations,
[&] {},
[&] {},
[&]() { sum += 1; }, [&]() { sum += 2; }, [&]() { sum += 3; });
EXPECT_EQ(100 + 200 + 300, sum);
}
template <typename T>
struct TestConcurrentFixture : public ::testing::Test {};
using ConcurrentOpSizes = ::testing::Types<std::integral_constant<int, 1>,
std::integral_constant<int, 4>,
std::integral_constant<int, 16>>;
TYPED_TEST_SUITE(TestConcurrentFixture, ConcurrentOpSizes);
TYPED_TEST(TestConcurrentFixture, Example2) {
static constexpr int kN = TypeParam{}();
static constexpr int kIterations = 100;
std::atomic<int> sum{0};
TestConcurrent<kN>(
kIterations,
[&] {},
[&] {}, [&](auto i) { sum += (i + 1); });
EXPECT_EQ((kIterations / 2) * kN * (kN + 1), sum);
}
} |
670 | cpp | google/tensorstore | zip_details | tensorstore/internal/compression/zip_details.cc | tensorstore/internal/compression/zip_details_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_ZIP_DETAILS_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_ZIP_DETAILS_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <string>
#include <variant>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "riegeli/bytes/reader.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_zip {
constexpr size_t kEOCDBlockSize = 65536 + 48;
constexpr const unsigned char kLocalHeaderLiteral[4] = {'P', 'K', 0x03, 0x04};
constexpr const unsigned char kCentralHeaderLiteral[4] = {'P', 'K', 0x01, 0x02};
constexpr const unsigned char kEOCDLiteral[4] = {'P', 'K', 0x05, 0x06};
constexpr const unsigned char kEOCD64LocatorLiteral[4] = {'P', 'K', 0x06, 0x07};
constexpr const unsigned char kEOCD64Literal[4] = {'P', 'K', 0x06, 0x06};
constexpr const unsigned char kDataDescriptorLiteral[4] = {'P', 'K', 0x07,
0x08};
constexpr const uint16_t kHasDataDescriptor = 0x08;
enum class ZipCompression : uint16_t {
kStore = 0,
kDeflate = 8,
kBzip2 = 12,
kLZMA = 14,
kZStd = 93,
kXZ = 95,
kAes = 99,
};
struct ZipEOCD64Locator {
uint32_t disk_number_with_cd;
int64_t cd_offset;
static constexpr int64_t kRecordSize = 20;
};
absl::Status ReadEOCD64Locator(riegeli::Reader &reader,
ZipEOCD64Locator &locator);
struct ZipEOCD {
uint64_t num_entries;
int64_t cd_size;
int64_t cd_offset;
uint64_t record_offset;
std::string comment;
static constexpr int64_t kEOCDRecordSize = 22;
static constexpr int64_t kEOCD64RecordSize = 48;
template <typename Sink>
friend void AbslStringify(Sink &sink, const ZipEOCD &entry) {
absl::Format(&sink,
"EOCD{num_entries=%d, cd_size=%d, cd_offset=%d, "
"record_offset=%d, comment=\"%s\"}",
entry.num_entries, entry.cd_size, entry.cd_offset,
entry.record_offset, entry.comment);
}
};
absl::Status ReadEOCD(riegeli::Reader &reader, ZipEOCD &eocd);
absl::Status ReadEOCD64(riegeli::Reader &reader, ZipEOCD &eocd);
std::variant<absl::Status, int64_t> TryReadFullEOCD(riegeli::Reader &reader,
ZipEOCD &eocd,
int64_t offset_adjustment);
struct ZipEntry {
uint16_t version_madeby;
uint16_t flags;
ZipCompression compression_method;
uint32_t crc;
uint64_t compressed_size;
uint64_t uncompressed_size;
uint16_t internal_fa;
uint32_t external_fa;
uint64_t local_header_offset;
uint64_t estimated_read_size;
uint64_t end_of_header_offset;
absl::Time mtime;
absl::Time atime;
std::string filename;
std::string comment;
bool is_zip64 = false;
static constexpr int64_t kCentralRecordSize = 46;
static constexpr int64_t kLocalRecordSize = 30;
template <typename Sink>
friend void AbslStringify(Sink &sink, const ZipEntry &entry) {
absl::Format(&sink,
"ZipEntry{\n"
" version_madeby=%v\n"
" flags=%x\n"
" compression_method=%v\n"
" crc=%08x\n"
" compressed_size=%d\n"
" uncompressed_size=%d\n"
" internal_fa=%x\n"
" external_fa=%x\n"
" local_header_offset=%v\n"
" estimated_read_size=%v\n"
" mtime=%s\n"
" atime=%s\n"
" filename=\"%s\"\n"
" comment=\"%s\"\n"
"}",
entry.version_madeby, entry.flags, entry.compression_method,
entry.crc, entry.compressed_size, entry.uncompressed_size,
entry.internal_fa, entry.external_fa,
entry.local_header_offset, entry.estimated_read_size,
absl::FormatTime(entry.mtime), absl::FormatTime(entry.atime),
entry.filename, entry.comment);
}
};
absl::Status ReadCentralDirectoryEntry(riegeli::Reader &reader,
ZipEntry &entry);
absl::Status ReadLocalEntry(riegeli::Reader &reader, ZipEntry &entry);
absl::Status ValidateEntryIsSupported(const ZipEntry &entry);
tensorstore::Result<std::unique_ptr<riegeli::Reader>> GetRawReader(
riegeli::Reader *reader, ZipEntry &entry);
tensorstore::Result<std::unique_ptr<riegeli::Reader>> GetReader(
riegeli::Reader *reader, ZipEntry &entry);
}
}
#endif
#include "tensorstore/internal/compression/zip_details.h"
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <ctime>
#include <ios>
#include <limits>
#include <memory>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "riegeli/bytes/limiting_reader.h"
#include "riegeli/bytes/prefix_limiting_reader.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bzip2/bzip2_reader.h"
#include "riegeli/endian/endian_reading.h"
#include "riegeli/xz/xz_reader.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zstd/zstd_reader.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/riegeli/find.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zip {
namespace {
using ::riegeli::ReadLittleEndian16;
using ::riegeli::ReadLittleEndian32;
using ::riegeli::ReadLittleEndian64;
using ::riegeli::ReadLittleEndianSigned64;
ABSL_CONST_INIT internal_log::VerboseFlag zip_logging("zip_details");
const absl::Time kWindowsEpoch =
::absl::UnixEpoch() - ::absl::Seconds(11644473600);
absl::Time MakeMSDOSTime(uint16_t date, uint16_t time) {
struct tm dos_tm;
dos_tm.tm_mday = (uint16_t)(date & 0x1f);
dos_tm.tm_mon = (uint16_t)((date >> 5) & 0xf) - 1;
dos_tm.tm_year = (uint16_t)(date >> 9) + 80;
dos_tm.tm_hour = (uint16_t)(time >> 11);
dos_tm.tm_min = (uint16_t)((time >> 5) & 0x1f);
dos_tm.tm_sec = (uint16_t)(2 * (time & 0x1f));
dos_tm.tm_isdst = -1;
return absl::FromTM(dos_tm, absl::UTCTimeZone());
}
absl::Status ReadExtraField_Zip64_0001(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 8);
entry.is_zip64 = true;
do {
if (tag_size >= 8 &&
entry.uncompressed_size == std::numeric_limits<uint32_t>::max()) {
if (!ReadLittleEndian64(reader, entry.uncompressed_size)) break;
tag_size -= 8;
}
if (tag_size >= 8 &&
entry.compressed_size == std::numeric_limits<uint32_t>::max()) {
if (!ReadLittleEndian64(reader, entry.compressed_size)) break;
tag_size -= 8;
}
if (tag_size >= 8 &&
entry.local_header_offset == std::numeric_limits<uint32_t>::max()) {
if (!ReadLittleEndian64(reader, entry.local_header_offset)) break;
tag_size -= 8;
}
return absl::OkStatus();
} while (false);
return absl::InvalidArgumentError("Failed to read ZIP64 extra field");
}
absl::Status ReadExtraField_Unix_000D(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 12);
uint32_t ignored32;
uint32_t mtime;
uint32_t atime;
if (!ReadLittleEndian32(reader, atime) ||
!ReadLittleEndian32(reader, mtime) ||
!ReadLittleEndian32(reader, ignored32) ) {
return absl::InvalidArgumentError("Failed to read UNIX extra field");
}
entry.atime = absl::FromUnixSeconds(atime);
entry.mtime = absl::FromUnixSeconds(mtime);
return absl::OkStatus();
}
absl::Status ReadExtraField_NTFS_000A(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 8);
uint32_t ignored32;
if (!ReadLittleEndian32(reader, ignored32)) {
return absl::InvalidArgumentError("Failed to read NTFS extra field");
}
tag_size -= 4;
uint16_t ntfs_tag, ntfs_size;
while (tag_size > 4) {
if (!ReadLittleEndian16(reader, ntfs_tag) ||
!ReadLittleEndian16(reader, ntfs_size)) {
break;
}
tag_size -= 4;
tag_size -= ntfs_size;
if (ntfs_tag == 0x0001 && ntfs_size == 24) {
uint64_t mtime;
uint64_t atime;
uint64_t ctime;
if (!ReadLittleEndian64(reader, mtime) ||
!ReadLittleEndian64(reader, atime) ||
!ReadLittleEndian64(reader, ctime)) {
return absl::InvalidArgumentError("Failed to read NTFS extra field");
}
entry.mtime = kWindowsEpoch + absl::Nanoseconds(mtime * 100);
entry.atime = kWindowsEpoch + absl::Nanoseconds(atime * 100);
} else {
reader.Skip(ntfs_size);
}
}
return absl::OkStatus();
}
absl::Status ReadExtraField_Unix_5455(riegeli::Reader &reader,
uint16_t tag_size, ZipEntry &entry) {
assert(tag_size >= 1);
uint8_t flags = 0;
uint32_t tstamp = 0;
do {
if (!reader.ReadByte(flags)) break;
--tag_size;
if (flags & 0x01 && tag_size >= 4) {
if (!ReadLittleEndian32(reader, tstamp)) break;
tag_size -= 4;
entry.mtime = absl::FromUnixSeconds(tstamp);
}
if (flags & 0x02 && tag_size >= 4) {
if (!ReadLittleEndian32(reader, tstamp)) break;
tag_size -= 4;
entry.atime = absl::FromUnixSeconds(tstamp);
}
if (flags & 0x04 && tag_size >= 4) {
if (!ReadLittleEndian32(reader, tstamp)) break;
tag_size -= 4;
}
return absl::OkStatus();
} while (false);
return absl::InvalidArgumentError(
"Failed to read unix timestamp extra field");
}
absl::Status ReadExtraField(riegeli::Reader &reader, ZipEntry &entry) {
uint16_t tag, tag_size;
absl::Status status;
while (reader.ok()) {
if (!ReadLittleEndian16(reader, tag) ||
!ReadLittleEndian16(reader, tag_size)) {
return absl::OkStatus();
}
ABSL_LOG_IF(INFO, zip_logging)
<< std::hex << "extra tag " << tag << " size " << tag_size;
auto pos = reader.pos();
switch (tag) {
case 0x0001:
status.Update(ReadExtraField_Zip64_0001(reader, tag_size, entry));
break;
case 0x000d:
status.Update(ReadExtraField_Unix_000D(reader, tag_size, entry));
break;
case 0x000a:
status.Update(ReadExtraField_NTFS_000A(reader, tag_size, entry));
break;
case 0x5455:
status.Update(ReadExtraField_Unix_5455(reader, tag_size, entry));
break;
case 0x7875:
break;
default:
break;
}
assert(reader.pos() <= pos + tag_size);
reader.Seek(pos + tag_size);
}
return status;
}
}
absl::Status ReadEOCD64Locator(riegeli::Reader &reader,
ZipEOCD64Locator &locator) {
if (!reader.Pull(ZipEOCD64Locator::kRecordSize)) {
return absl::InvalidArgumentError(
"ZIP EOCD64 Locator Entry insufficient data available");
}
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x07064b50) {
return absl::InvalidArgumentError(absl::StrFormat(
"Failed to read ZIP64 End of Central Directory Locator signature %08x",
signature));
}
uint32_t ignored32;
ReadLittleEndian32(reader, locator.disk_number_with_cd);
ReadLittleEndianSigned64(reader, locator.cd_offset);
ReadLittleEndian32(reader, ignored32);
if (locator.cd_offset < 0) {
ABSL_LOG_IF(INFO, zip_logging && !reader.ok()) << reader.status();
return absl::InvalidArgumentError(
"Failed to read ZIP64 End of Central Directory Locator");
}
return absl::OkStatus();
}
absl::Status ReadEOCD64(riegeli::Reader &reader, ZipEOCD &eocd) {
if (!reader.Pull(ZipEOCD::kEOCD64RecordSize)) {
return absl::InvalidArgumentError(
"ZIP EOCD Entry insufficient data available");
}
auto eocd_pos = reader.pos();
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x06064b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP64 Central Directory Entry signature");
}
uint64_t eocd_size;
ReadLittleEndian64(reader, eocd_size);
if (eocd_size < 44 || !reader.Pull(eocd_size)) {
return absl::InvalidArgumentError(
"Failed to read ZIP64 End of Central Directory");
}
riegeli::LimitingReader oecd64_reader(
&reader,
riegeli::LimitingReaderBase::Options().set_exact_length(eocd_size));
uint16_t version_madeby;
uint16_t version_needed_to_extract;
uint32_t disk_number;
uint32_t disk_number_with_cd;
uint64_t total_num_entries;
ReadLittleEndian16(oecd64_reader, version_madeby);
ReadLittleEndian16(oecd64_reader, version_needed_to_extract);
ReadLittleEndian32(oecd64_reader, disk_number);
ReadLittleEndian32(oecd64_reader, disk_number_with_cd);
ReadLittleEndian64(oecd64_reader, eocd.num_entries);
ReadLittleEndian64(oecd64_reader, total_num_entries);
ReadLittleEndianSigned64(oecd64_reader, eocd.cd_size);
ReadLittleEndianSigned64(oecd64_reader, eocd.cd_offset);
if (disk_number != disk_number_with_cd ||
eocd.num_entries != total_num_entries ||
eocd.num_entries == std::numeric_limits<uint16_t>::max() ||
eocd.cd_size == std::numeric_limits<uint16_t>::max() ||
eocd.cd_offset == std::numeric_limits<uint32_t>::max() ||
eocd.cd_size < 0 || eocd.cd_offset < 0) {
return absl::InvalidArgumentError(
"Failed to read ZIP64 End of Central Directory");
}
oecd64_reader.Seek(eocd_size);
eocd.record_offset = eocd_pos;
return absl::OkStatus();
}
absl::Status ReadEOCD(riegeli::Reader &reader, ZipEOCD &eocd) {
if (!reader.Pull(ZipEOCD::kEOCDRecordSize)) {
return absl::InvalidArgumentError(
"ZIP EOCD Entry insufficient data available");
}
auto eocd_pos = reader.pos();
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x06054b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry signature");
}
uint16_t disk_number;
uint16_t disk_number_with_cd;
uint16_t num_entries;
uint16_t total_num_entries;
uint32_t cd_size;
uint32_t cd_offset;
uint16_t comment_length;
ReadLittleEndian16(reader, disk_number);
ReadLittleEndian16(reader, disk_number_with_cd);
ReadLittleEndian16(reader, num_entries);
ReadLittleEndian16(reader, total_num_entries);
ReadLittleEndian32(reader, cd_size);
ReadLittleEndian32(reader, cd_offset);
ReadLittleEndian16(reader, comment_length);
if (num_entries != total_num_entries) {
ABSL_LOG(INFO) << "ZIP num_entries mismatch " << num_entries << " vs "
<< total_num_entries;
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
if (disk_number != disk_number_with_cd) {
ABSL_LOG(INFO) << "ZIP disk_number mismatch " << disk_number << " vs "
<< disk_number_with_cd;
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
if (comment_length > 0 && !reader.Read(comment_length, eocd.comment)) {
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
reader.VerifyEnd();
if (!reader.status().ok()) {
return absl::InvalidArgumentError(
"Failed to read ZIP End of Central Directory");
}
eocd.record_offset = eocd_pos;
eocd.num_entries = num_entries;
eocd.cd_size = cd_size;
eocd.cd_offset = cd_offset;
if (total_num_entries == std::numeric_limits<uint16_t>::max() ||
cd_offset == std::numeric_limits<uint32_t>::max()) {
eocd.cd_offset = std::numeric_limits<uint32_t>::max();
}
return absl::OkStatus();
}
std::variant<absl::Status, int64_t> TryReadFullEOCD(riegeli::Reader &reader,
ZipEOCD &eocd,
int64_t offset_adjustment) {
if (!internal::FindLast(
reader, std::string_view(reinterpret_cast<const char *>(kEOCDLiteral),
sizeof(kEOCDLiteral)))) {
return absl::InvalidArgumentError("Failed to find valid ZIP EOCD");
}
int64_t eocd_start = reader.pos();
ZipEOCD last_eocd{};
TENSORSTORE_RETURN_IF_ERROR(ReadEOCD(reader, last_eocd));
if (last_eocd.cd_offset != std::numeric_limits<uint32_t>::max()) {
eocd = last_eocd;
reader.Seek(eocd_start + 4);
return absl::OkStatus();
}
if (eocd_start < ZipEOCD64Locator::kRecordSize) {
return absl::InvalidArgumentError("Block does not contain EOCD64 Locator");
}
if (!reader.Seek(eocd_start - ZipEOCD64Locator::kRecordSize)) {
if (!reader.ok() && !reader.status().ok()) {
return MaybeAnnotateStatus(reader.status(),
"Failed to read EOCD64 Locator");
}
return absl::InvalidArgumentError("Failed to read EOCD64 Locator");
}
ZipEOCD64Locator locator;
TENSORSTORE_RETURN_IF_ERROR(ReadEOCD64Locator(reader, locator));
if (offset_adjustment < 0) {
return locator.cd_offset;
}
auto target_pos = locator.cd_offset - offset_adjustment;
if (target_pos < 0) {
assert(offset_adjustment > 0);
return locator.cd_offset;
}
if (!reader.Seek(target_pos)) {
if (!reader.ok() && !reader.status().ok()) {
return MaybeAnnotateStatus(reader.status(), "Failed to read EOCD64");
}
return absl::InvalidArgumentError("Failed to read EOCD64");
}
TENSORSTORE_RETURN_IF_ERROR(ReadEOCD64(reader, last_eocd));
eocd = last_eocd;
reader.Seek(eocd_start + 4);
return absl::OkStatus();
}
absl::Status ReadCentralDirectoryEntry(riegeli::Reader &reader,
ZipEntry &entry) {
if (!reader.Pull(ZipEntry::kCentralRecordSize)) {
return absl::InvalidArgumentError(
"ZIP Central Directory Entry insufficient data available");
}
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x02014b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry signature");
}
uint32_t uncompressed_size = 0;
uint32_t compressed_size;
uint32_t relative_header_offset = 0;
uint16_t file_name_length = 0;
uint16_t extra_field_length = 0;
uint16_t file_comment_length = 0;
uint16_t last_mod_time;
uint16_t last_mod_date;
uint16_t ignored16;
uint16_t compression_method;
ReadLittleEndian16(reader, entry.version_madeby);
ReadLittleEndian16(reader, ignored16);
ReadLittleEndian16(reader, entry.flags);
ReadLittleEndian16(reader, compression_method);
ReadLittleEndian16(reader, last_mod_time);
ReadLittleEndian16(reader, last_mod_date);
ReadLittleEndian32(reader, entry.crc);
ReadLittleEndian32(reader, compressed_size);
ReadLittleEndian32(reader, uncompressed_size);
ReadLittleEndian16(reader, file_name_length);
ReadLittleEndian16(reader, extra_field_length);
ReadLittleEndian16(reader, file_comment_length);
ReadLittleEndian16(reader, ignored16);
ReadLittleEndian16(reader, entry.internal_fa);
ReadLittleEndian32(reader, entry.external_fa);
ReadLittleEndian32(reader, relative_header_offset);
entry.compressed_size = compressed_size;
entry.uncompressed_size = uncompressed_size;
entry.local_header_offset = relative_header_offset;
entry.mtime = MakeMSDOSTime(last_mod_date, last_mod_time);
entry.compression_method = static_cast<ZipCompression>(compression_method);
if (file_name_length > 0 && !reader.Read(file_name_length, entry.filename)) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry (filename)");
}
assert(entry.filename.size() == file_name_length);
if (extra_field_length > 0) {
assert(extra_field_length > 4);
riegeli::LimitingReader extra_reader(
&reader, riegeli::LimitingReaderBase::Options().set_exact_length(
extra_field_length));
extra_reader.SetReadAllHint(true);
if (auto status = ReadExtraField(extra_reader, entry); !status.ok()) {
return status;
}
extra_reader.Seek(extra_field_length);
}
if (file_comment_length > 0 &&
!reader.Read(file_comment_length, entry.comment)) {
return absl::InvalidArgumentError(
"Failed to read ZIP Central Directory Entry (comment)");
}
entry.end_of_header_offset = reader.pos();
entry.estimated_read_size =
std::max(entry.compressed_size, entry.uncompressed_size) +
file_name_length + extra_field_length + ZipEntry::kLocalRecordSize +
(entry.flags & kHasDataDescriptor ? 12 : 0);
return absl::OkStatus();
}
absl::Status ReadLocalEntry(riegeli::Reader &reader, ZipEntry &entry) {
if (!reader.Pull(ZipEntry::kLocalRecordSize)) {
return absl::InvalidArgumentError(
"ZIP Local Entry insufficient data available");
}
uint32_t signature;
ReadLittleEndian32(reader, signature);
if (signature != 0x04034b50) {
return absl::InvalidArgumentError(
"Failed to read ZIP Local Entry signature");
}
uint16_t ignored16;
uint16_t compression_method;
uint16_t last_mod_time;
uint16_t last_mod_date;
uint32_t uncompressed_size;
uint32_t compressed_size;
uint16_t file_name_length = 0;
uint16_t extra_field_length = 0;
ReadLittleEndian16(reader, ignored16);
ReadLittleEndian16(reader, entry.flags);
ReadLittleEndian16(reader, compression_method);
ReadLittleEndian16(reader, last_mod_time);
ReadLittleEndian16(reader, last_mod_date);
ReadLittleEndian32(reader, entry.crc);
ReadLittleEndian32(reader, compressed_size);
ReadLittleEndian32(reader, uncompressed_size);
ReadLittleEndian16(reader, file_name_length);
ReadLittleEndian16(reader, extra_field_length);
entry.version_madeby = 0;
entry.internal_fa = 0;
entry.external_fa = 0;
entry.local_header_offset = 0;
entry.estimated_read_size = 0;
entry.compressed_size = compressed_size;
entry.uncompressed_size = uncompressed_size;
entry.mtime = MakeMSDOSTime(last_mod_date, last_mod_time);
entry.compression_method = static_cast<ZipCompression>(compression_method);
if (file_name_length > 0 && !reader.Read(file_name_length, entry.filename)) {
return absl::InvalidArgumentError(
"Failed to read ZIP Local Entry (filename)");
}
assert(entry.filename.size() == file_name_length);
entry.end_of_header_offset = reader.pos() + extra_field_length;
if (extra_field_length > 0) {
assert(extra_field_length > 4);
riegeli::LimitingReader extra_reader(
&reader, riegeli::LimitingReaderBase::Options().set_exact_length(
extra_field_length));
extra_reader.SetReadAllHint(true);
if (auto status = ReadExtraField(extra_reader, entry); !status.ok()) {
return status;
}
extra_reader.Seek(extra_field_length);
}
return absl::OkStatus();
}
absl::Status ValidateEntryIsSupported(const ZipEntry &entry) {
if (entry.flags & 0x01 ||
entry.flags & (uint16_t{1} << 6) ||
entry.flags & (uint16_t{1} << 13) ||
entry.compression_method == ZipCompression::kAes) {
return absl::InvalidArgumentError(
tensorstore::StrCat("ZIP encryption is not supported"));
}
if (entry.compression_method != ZipCompression::kStore &&
entry.compression_method != ZipCompression::kDeflate &&
entry.compression_method != ZipCompression::kBzip2 &&
entry.compression_method != ZipCompression::kZStd &&
entry.compression_method != ZipCompression::kXZ) {
return absl::InvalidArgumentError(
tensorstore::StrCat("ZIP compression method ", entry.compression_method,
" is not supported"));
}
if (absl::EndsWith(entry.filename, "/")) {
return absl::InvalidArgumentError("ZIP directory entries cannot be read");
}
return absl::OkStatus();
}
tensorstore::Result<std::unique_ptr<riegeli::Reader>> GetRawReader(
riegeli::Reader *reader, ZipEntry &entry) {
assert(reader != nullptr);
if (entry.flags & kHasDataDescriptor) {
const auto start_pos = reader->pos();
if (!reader->Skip(entry.compressed_size)) {
return reader->status();
}
static constexpr size_t kZipDataDescriptorSize = 16;
static constexpr size_t kZip64DataDescriptorSize = 24;
if (!reader->Pull(entry.is_zip64 ? kZip64DataDescriptorSize
: kZipDataDescriptorSize)) {
return absl::DataLossError("Failed to read ZIP DataDescriptor");
}
uint32_t signature, crc32;
ReadLittleEndian32(*reader, signature);
ReadLittleEndian32(*reader, crc32);
if (signature != 0x08074b50) {
return absl::DataLossError(absl::StrFormat(
"Failed to read ZIP DataDescriptor signature %08x", signature));
}
if (entry.crc == 0) entry.crc = crc32;
if (entry.is_zip64) {
uint64_t compressed_size, uncompressed_size;
ReadLittleEndian64(*reader, compressed_size);
ReadLittleEndian64(*reader, uncompressed_size);
if (entry.compressed_size == 0) entry.compressed_size = compressed_size;
if (entry.uncompressed_size == 0)
entry.uncompressed_size = uncompressed_size;
} else {
uint32_t compressed_size, uncompressed_size;
ReadLittleEndian32(*reader, compressed_size);
ReadLittleEndian32(*reader, uncompressed_size);
if (entry.compressed_size == 0) {
entry.compressed_size = compressed_size;
}
if (entry.uncompressed_size == 0) {
entry.uncompressed_size = uncompressed_size;
}
}
if | #include "tensorstore/internal/compression/zip_details.h"
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/string_reader.h"
#include "tensorstore/internal/riegeli/find.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal::FindFirst;
using ::tensorstore::internal::StartsWith;
using ::tensorstore::internal_zip::kCentralHeaderLiteral;
using ::tensorstore::internal_zip::kEOCDLiteral;
using ::tensorstore::internal_zip::kLocalHeaderLiteral;
using ::tensorstore::internal_zip::ReadCentralDirectoryEntry;
using ::tensorstore::internal_zip::ReadEOCD;
using ::tensorstore::internal_zip::ReadEOCD64Locator;
using ::tensorstore::internal_zip::ReadLocalEntry;
using ::tensorstore::internal_zip::TryReadFullEOCD;
using ::tensorstore::internal_zip::ZipCompression;
using ::tensorstore::internal_zip::ZipEntry;
using ::tensorstore::internal_zip::ZipEOCD;
using ::tensorstore::internal_zip::ZipEOCD64Locator;
using ::tensorstore::internal_zip::kCentralHeaderLiteral;
using ::tensorstore::internal_zip::kEOCD64Literal;
using ::tensorstore::internal_zip::kEOCD64LocatorLiteral;
using ::tensorstore::internal_zip::kEOCDLiteral;
using ::tensorstore::internal_zip::kLocalHeaderLiteral;
ABSL_FLAG(std::string, tensorstore_test_data, "",
"Path to internal/compression/testdata/data.zip");
namespace {
absl::Cord GetTestZipFileData() {
ABSL_CHECK(!absl::GetFlag(FLAGS_tensorstore_test_data).empty());
absl::Cord filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(
riegeli::FdReader(absl::GetFlag(FLAGS_tensorstore_test_data)), filedata));
ABSL_CHECK_EQ(filedata.size(), 319482);
return filedata;
}
static constexpr unsigned char kMinimalZip[] = {
0x50, 0x4b, 0x5, 0x6,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0};
static constexpr unsigned char kZip64OneEmptyFile[] = {
0x50, 0x4b, 0x03, 0x04, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x72,
0x5b, 0x40, 0x07, 0xa1, 0xea, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x01, 0x00, 0x14, 0x00, 0x2d, 0x01, 0x00, 0x10, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x61, 0x0a,
0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x4f, 0x72, 0x5b, 0x40, 0x07, 0xa1, 0xea, 0xdd, 0x02, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x80, 0x11, 0x00, 0x00, 0x00, 0x00, 0x2d,
0x50, 0x4b, 0x06, 0x06, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x50, 0x4b, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00,
0x2f, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static constexpr unsigned char kZipTest2[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x04, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x55, 0x54,
0x09, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x04, 0x00, 0x64, 0x00, 0x14, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x98,
0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x55, 0x54, 0x09, 0x00, 0x03, 0x09, 0x15, 0xe4, 0x41, 0x9a,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x04, 0x00, 0xe8, 0x03, 0x64, 0x00,
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d,
0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00,
0x00, 0x00, 0x0d, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69,
0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55, 0x54, 0x09, 0x00, 0x03,
0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x04, 0x00,
0xe8, 0x03, 0x64, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x55, 0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55,
0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7b, 0x98, 0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x10, 0x00, 0xed, 0x41, 0x3c, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x55, 0x54, 0x05, 0x00, 0x03, 0x09,
0x15, 0xe4, 0x41, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00,
0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x77, 0x00, 0x00, 0x00, 0x74, 0x65,
0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55,
0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x00, 0x00,
0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
0xca, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00,
};
template <size_t N>
std::string_view StringViewOf(const unsigned char (&str)[N]) {
return std::string_view(reinterpret_cast<const char*>(str), N);
}
TEST(ZipDetailsTest, DecodeEOCD) {
riegeli::StringReader string_reader(StringViewOf(kMinimalZip));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral)));
ZipEOCD eocd;
ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk());
EXPECT_EQ(eocd.num_entries, 0);
EXPECT_EQ(eocd.cd_size, 0);
EXPECT_EQ(eocd.cd_offset, 0);
}
TEST(ZipDetailsTest, ReadEOCDZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral)));
ZipEOCD eocd;
ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk());
EXPECT_EQ(eocd.num_entries, 1);
EXPECT_EQ(eocd.cd_size, 47);
EXPECT_EQ(eocd.cd_offset, 53);
}
TEST(ZipDetailsTest, ReadEOCD6LocatorZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64LocatorLiteral)));
ZipEOCD64Locator eocd64_locator;
ASSERT_THAT(ReadEOCD64Locator(string_reader, eocd64_locator),
::tensorstore::IsOk());
EXPECT_EQ(eocd64_locator.disk_number_with_cd, 0);
EXPECT_EQ(eocd64_locator.cd_offset, 100);
}
TEST(ZipDetailsTest, ReadEOCD64Zip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64Literal)));
EXPECT_EQ(100, string_reader.pos());
ZipEOCD eocd64;
ASSERT_THAT(ReadEOCD64(string_reader, eocd64), ::tensorstore::IsOk());
EXPECT_EQ(eocd64.num_entries, 1);
EXPECT_EQ(eocd64.cd_size, 47);
EXPECT_EQ(eocd64.cd_offset, 53);
}
TEST(ZipDetailsTest, TryReadFullEOCDZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64Literal)));
EXPECT_EQ(100, string_reader.pos());
ZipEOCD eocd64;
ASSERT_THAT(TryReadFullEOCD(string_reader, eocd64, 0),
::testing::VariantWith<absl::Status>(::tensorstore::IsOk()));
EXPECT_EQ(eocd64.num_entries, 1);
EXPECT_EQ(eocd64.cd_size, 47);
EXPECT_EQ(eocd64.cd_offset, 53);
}
TEST(ZipDetailsTest, ReadCentralHeaderZip64) {
riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_EQ(53, string_reader.pos());
ZipEntry central_header;
ASSERT_THAT(ReadCentralDirectoryEntry(string_reader, central_header),
::tensorstore::IsOk());
EXPECT_EQ(central_header.version_madeby, 798);
EXPECT_EQ(central_header.flags, 0);
EXPECT_EQ(central_header.compression_method, ZipCompression::kStore);
EXPECT_EQ(central_header.crc, 3723141383);
EXPECT_EQ(central_header.compressed_size, 2);
EXPECT_EQ(central_header.uncompressed_size, 2);
EXPECT_EQ(central_header.internal_fa, 1);
EXPECT_EQ(central_header.external_fa, 293601280);
EXPECT_EQ(central_header.local_header_offset, 0);
EXPECT_EQ(central_header.filename, "-");
EXPECT_EQ(central_header.comment, "");
EXPECT_GT(central_header.mtime, absl::UnixEpoch());
}
TEST(ZipDetailsTest, ReadLocalHeaderZip64) {
riegeli::StringReader string_reader(
reinterpret_cast<const char*>(kZip64OneEmptyFile),
sizeof(kZip64OneEmptyFile));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kLocalHeaderLiteral)));
ZipEntry local_header;
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.version_madeby, 0);
EXPECT_EQ(local_header.flags, 0);
EXPECT_EQ(local_header.compression_method, ZipCompression::kStore);
EXPECT_EQ(local_header.crc, 3723141383);
EXPECT_EQ(local_header.compressed_size, 2);
EXPECT_EQ(local_header.uncompressed_size, 2);
EXPECT_EQ(local_header.internal_fa, 0);
EXPECT_EQ(local_header.external_fa, 0);
EXPECT_EQ(local_header.local_header_offset, 0);
EXPECT_EQ(local_header.filename, "-");
EXPECT_EQ(local_header.comment, "");
EXPECT_GT(local_header.mtime, absl::UnixEpoch());
}
TEST(ZipDetailsTest, Decode) {
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kZipTest2),
sizeof(kZipTest2));
EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral)));
ZipEOCD eocd;
ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk());
EXPECT_EQ(eocd.num_entries, 3);
EXPECT_EQ(eocd.cd_size, 202);
EXPECT_EQ(eocd.cd_offset, 188);
string_reader.Seek(eocd.cd_offset);
std::vector<ZipEntry> central_headers;
for (size_t i = 0; i < eocd.num_entries; ++i) {
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kCentralHeaderLiteral)))
<< i;
ZipEntry header;
ASSERT_THAT(ReadCentralDirectoryEntry(string_reader, header),
::tensorstore::IsOk());
central_headers.push_back(std::move(header));
}
std::vector<ZipEntry> local_headers;
for (const auto& header : central_headers) {
ZipEntry local_header;
string_reader.Seek(header.local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
local_headers.push_back(std::move(local_header));
absl::Cord data;
string_reader.Read(local_headers.back().compressed_size, data);
}
ASSERT_THAT(local_headers.size(), 3);
for (size_t i = 0; i < local_headers.size(); ++i) {
EXPECT_EQ(local_headers[i].flags, central_headers[i].flags);
EXPECT_EQ(local_headers[i].compression_method,
central_headers[i].compression_method);
EXPECT_EQ(local_headers[i].crc, central_headers[i].crc);
EXPECT_EQ(local_headers[i].compressed_size,
central_headers[i].compressed_size);
EXPECT_EQ(local_headers[i].uncompressed_size,
central_headers[i].uncompressed_size);
EXPECT_EQ(local_headers[i].filename, central_headers[i].filename);
}
}
struct ZipDirectory {
ZipEOCD eocd;
std::vector<ZipEntry> entries;
};
absl::Status ReadDirectory(riegeli::Reader& reader, ZipDirectory& directory) {
int64_t initial_pos = reader.pos();
auto response =
tensorstore::internal_zip::TryReadFullEOCD(reader, directory.eocd, -1);
if (std::holds_alternative<int64_t>(response)) {
reader.Seek(initial_pos);
response =
tensorstore::internal_zip::TryReadFullEOCD(reader, directory.eocd, 0);
}
if (auto* status = std::get_if<absl::Status>(&response);
status != nullptr && !status->ok()) {
return std::move(*status);
}
if (std::holds_alternative<int64_t>(response)) {
return absl::InternalError("ZIP incomplete");
}
reader.Seek(directory.eocd.cd_offset);
std::vector<ZipEntry> central_headers;
for (size_t i = 0; i < directory.eocd.num_entries; ++i) {
ZipEntry header{};
if (auto entry_status = ReadCentralDirectoryEntry(reader, header);
!entry_status.ok()) {
return entry_status;
}
directory.entries.push_back(std::move(header));
}
return absl::OkStatus();
}
TEST(ZipDetailsTest, ReadDirectory) {
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kZipTest2),
sizeof(kZipTest2));
ZipDirectory dir;
EXPECT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
std::vector<ZipEntry> local_headers;
for (const auto& header : dir.entries) {
ZipEntry local_header;
string_reader.Seek(header.local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
local_headers.push_back(std::move(local_header));
}
EXPECT_THAT(local_headers.size(), 3);
for (size_t i = 0; i < local_headers.size(); ++i) {
EXPECT_EQ(local_headers[i].flags, dir.entries[i].flags);
EXPECT_EQ(local_headers[i].compression_method,
dir.entries[i].compression_method);
EXPECT_EQ(local_headers[i].crc, dir.entries[i].crc);
EXPECT_EQ(local_headers[i].compressed_size, dir.entries[i].compressed_size);
EXPECT_EQ(local_headers[i].uncompressed_size,
dir.entries[i].uncompressed_size);
EXPECT_EQ(local_headers[i].filename, dir.entries[i].filename);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_headers[0]));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data, "test\n");
EXPECT_EQ(data.size(), local_headers[0].uncompressed_size);
}
TEST(ZipDetailsTest, Xz) {
static constexpr unsigned char kXZ[] = {
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x00, 0x00, 0x5f, 0x00, 0x89, 0x8a,
0x36, 0x4f, 0x28, 0xe2, 0xde, 0xa0, 0x48, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0xfd, 0x37, 0x7a,
0x58, 0x5a, 0x00, 0x00, 0x00, 0xff, 0x12, 0xd9, 0x41, 0x02, 0x00, 0x21,
0x01, 0x00, 0x00, 0x00, 0x00, 0x37, 0x27, 0x97, 0xd6, 0xe0, 0x00, 0x3f,
0x00, 0x11, 0x5e, 0x00, 0x30, 0xec, 0xbd, 0xa0, 0xa3, 0x19, 0xd7, 0x9c,
0xf2, 0xec, 0x93, 0x6b, 0xfe, 0x81, 0xb3, 0x7a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x25, 0x40, 0x5c, 0x24, 0xa9, 0xbe, 0x06, 0x72, 0x9e,
0x7a, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x5a, 0x50, 0x4b, 0x01,
0x02, 0x14, 0x00, 0x14, 0x00, 0x00, 0x00, 0x5f, 0x00, 0x89, 0x8a, 0x36,
0x4f, 0x28, 0xe2, 0xde, 0xa0, 0x48, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,
0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d,
0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b,
0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00,
0x00, 0x00, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kXZ),
sizeof(kXZ));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kXZ);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data,
"aaaaaaaaaaaaaa\r\nbbbbbbbbbbbbbb\r\naaaaaaaaaaaaaa\r\ncccccccccccc"
"cc\r\n");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(ZipDetailsTest, Zstd) {
static constexpr unsigned char kZStd[] = {
0x50, 0x4b, 0x03, 0x04, 0x3f, 0x00, 0x00, 0x00, 0x5d, 0x00, 0xa2, 0x69,
0xf2, 0x50, 0x28, 0xe2, 0xde, 0xa0, 0x20, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x28, 0xb5, 0x2f,
0xfd, 0x20, 0x40, 0xbd, 0x00, 0x00, 0x68, 0x61, 0x61, 0x0d, 0x0a, 0x62,
0x0d, 0x0a, 0x61, 0x0d, 0x0a, 0x63, 0x0d, 0x0a, 0x04, 0x10, 0x00, 0xc7,
0x38, 0xc6, 0x31, 0x38, 0x2c, 0x50, 0x4b, 0x01, 0x02, 0x3f, 0x00, 0x3f,
0x00, 0x00, 0x00, 0x5d, 0x00, 0xa2, 0x69, 0xf2, 0x50, 0x28, 0xe2, 0xde,
0xa0, 0x20, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65,
0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00, 0x00, 0x00, 0x4d, 0x00,
0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(StringViewOf(kZStd));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kZStd);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data,
"aaaaaaaaaaaaaa\r\nbbbbbbbbbbbbbb\r\naaaaaaaaaaaaaa\r\ncccccccccccc"
"cc\r\n");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(ZipDetailsTest, Bzip2) {
static constexpr unsigned char kBzip2[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x54, 0x74,
0x45, 0x3c, 0x48, 0x40, 0x35, 0xb0, 0x2f, 0x00, 0x00, 0x00, 0x3c, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x42, 0x5a, 0x68,
0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x03, 0x64, 0xc8, 0x04, 0x00,
0x00, 0x07, 0x41, 0x00, 0x00, 0x10, 0x38, 0x00, 0x20, 0x00, 0x30, 0xcd,
0x34, 0x12, 0x6a, 0x7a, 0x95, 0x10, 0x26, 0x4e, 0xcd, 0x9f, 0x17, 0x72,
0x45, 0x38, 0x50, 0x90, 0x03, 0x64, 0xc8, 0x04, 0x50, 0x4b, 0x01, 0x02,
0x1e, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x54, 0x74, 0x45, 0x3c,
0x48, 0x40, 0x35, 0xb0, 0x2f, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfd, 0x81, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b, 0x05,
0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00, 0x00,
0x00, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(StringViewOf(kBzip2));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kBzip2);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data,
"aaaaaaaaaaaaaa\nbbbbbbbbbbbbbb\naaaaaaaaaaaaaa\ncccccccccccccc\n");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(ZipDetailsTest, Deflate) {
static constexpr unsigned char kDeflate[] = {
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x56, 0x5e,
0x9c, 0x40, 0xb0, 0x91, 0x01, 0x58, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00,
0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x66, 0x69, 0x72, 0x73, 0x74, 0x73,
0x65, 0x63, 0x6f, 0x6e, 0x64, 0x4b, 0xcb, 0x2c, 0x2a, 0x2e, 0x29, 0x48,
0x2c, 0x2a, 0x29, 0x4e, 0x4d, 0xce, 0xcf, 0x4b, 0x01, 0xb1, 0x00, 0x50,
0x4b, 0x01, 0x02, 0x1e, 0x03, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x56,
0x5e, 0x9c, 0x40, 0xb0, 0x91, 0x01, 0x58, 0x12, 0x00, 0x00, 0x00, 0x13,
0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x66, 0x69, 0x72,
0x73, 0x74, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x50, 0x4b, 0x05, 0x06,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00, 0x00, 0x00,
0x3b, 0x00, 0x00, 0x00, 0x00, 0x00,
};
riegeli::StringReader string_reader(StringViewOf(kDeflate));
ZipDirectory dir;
ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk());
EXPECT_THAT(dir.entries.size(), ::testing::Gt(0));
ZipEntry local_header;
string_reader.Seek(dir.entries[0].local_header_offset);
EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral)));
ASSERT_THAT(ReadLocalEntry(string_reader, local_header),
::tensorstore::IsOk());
EXPECT_EQ(local_header.compression_method, ZipCompression::kDeflate);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader,
GetReader(&string_reader, local_header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data, "firstpartsecondpart");
EXPECT_EQ(data.size(), local_header.uncompressed_size);
}
TEST(TestdataTest, HeaderPositions) {
riegeli::CordReader reader(GetTestZipFileData());
EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x19FA6);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x33F4D);
reader.Seek(0);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DEF3);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DF43);
reader.Skip(4);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DF94);
reader.Seek(0);
EXPECT_TRUE(FindFirst(reader, StringViewOf(kEOCDLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kEOCDLiteral)));
EXPECT_THAT(reader.pos(), 0x4DFE4);
}
TEST(TestdataTest, LocalHeaderEntry) {
riegeli::CordReader reader(GetTestZipFileData());
ZipEntry header;
EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0);
ASSERT_THAT(ReadLocalEntry(reader, header), ::tensorstore::IsOk());
EXPECT_THAT(header.version_madeby, 0);
EXPECT_THAT(header.flags, 0x2);
EXPECT_THAT(header.compression_method, ZipCompression::kDeflate);
EXPECT_THAT(header.crc, 0x94EE1E3E);
EXPECT_THAT(header.compressed_size, 0x00019F62);
EXPECT_THAT(header.uncompressed_size, 0x00019F6F);
EXPECT_THAT(header.internal_fa, 0);
EXPECT_THAT(header.external_fa, 0);
EXPECT_THAT(header.local_header_offset, 0);
EXPECT_THAT(header.end_of_header_offset, 68);
EXPECT_THAT(header.filename, "data/a.png");
EXPECT_THAT(header.comment, "");
EXPECT_THAT(header.is_zip64, false);
}
TEST(TestdataTest, CentralHeaderEntry) {
riegeli::CordReader reader(GetTestZipFileData());
reader.Seek(0x4DEF3);
ASSERT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral)));
EXPECT_THAT(reader.pos(), 0x4DEF3);
ZipEntry header{};
ASSERT_THAT(ReadCentralDirectoryEntry(reader, header), ::tensorstore::IsOk());
EXPECT_THAT(header.flags, 0x2);
EXPECT_THAT(header.compression_method, ZipCompression::kDeflate);
EXPECT_THAT(header.crc, 0x94EE1E3E);
EXPECT_THAT(header.compressed_size, 0x00019F62);
EXPECT_THAT(header.uncompressed_size, 0x00019F6F);
EXPECT_THAT(header.local_header_offset, 0);
EXPECT_THAT(header.end_of_header_offset, 24);
EXPECT_THAT(header.filename, "data/a.png");
EXPECT_THAT(header.comment, "");
EXPECT_THAT(header.is_zip64, false);
EXPECT_THAT(header.version_madeby, 0x031E);
EXPECT_THAT(header.internal_fa, 0);
EXPECT_THAT(header.external_fa, 0x81240001);
EXPECT_THAT(header.local_header_offset, 0);
EXPECT_THAT(header.estimated_read_size, 106415);
}
TEST(TestdataTest, EOCD) {
riegeli::CordReader reader(GetTestZipFileData());
ASSERT_TRUE(FindFirst(reader, StringViewOf(kEOCDLiteral)));
EXPECT_TRUE(StartsWith(reader, StringViewOf(kEOCDLiteral)));
EXPECT_THAT(reader.pos(), 0x4DFE4);
::tensorstore::internal_zip::ZipEOCD eocd{};
ASSERT_THAT(ReadEOCD(reader, eocd), ::tensorstore::IsOk());
EXPECT_THAT(eocd.num_entries, 3);
EXPECT_THAT(eocd.cd_size, 0x000000F1);
EXPECT_THAT(eocd.cd_offset, 0x0004DEF3);
EXPECT_THAT(eocd.comment, "");
}
TEST(TestdataTest, FileData) {
riegeli::CordReader reader(GetTestZipFileData());
ZipEntry header;
ASSERT_THAT(ReadLocalEntry(reader, header), ::tensorstore::IsOk());
EXPECT_THAT(reader.pos(), 0x0044);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entry_reader, tensorstore::internal_zip::GetReader(&reader, header));
std::string data;
EXPECT_THAT(riegeli::ReadAll(*entry_reader, data), ::tensorstore::IsOk());
EXPECT_EQ(data.size(), header.uncompressed_size);
}
} |
671 | cpp | google/tensorstore | zlib | tensorstore/internal/compression/zlib.cc | tensorstore/internal/compression/zlib_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_ZLIB_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_ZLIB_H_
#include <cstddef>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace zlib {
struct Options {
int level = -1;
bool use_gzip_header = false;
};
void Encode(const absl::Cord& input, absl::Cord* output,
const Options& options);
absl::Status Decode(const absl::Cord& input, absl::Cord* output,
bool use_gzip_header);
}
}
#endif
#include "tensorstore/internal/compression/zlib.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "tensorstore/internal/compression/cord_stream_manager.h"
#include <zlib.h>
namespace tensorstore {
namespace zlib {
namespace {
struct InflateOp {
static int Init(z_stream* s, [[maybe_unused]] int level, int header_option) {
return inflateInit2(s, 15
+ header_option);
}
static int Process(z_stream* s, int flags) { return inflate(s, flags); }
static int Destroy(z_stream* s) { return inflateEnd(s); }
static constexpr bool kDataErrorPossible = true;
};
struct DeflateOp {
static int Init(z_stream* s, int level, int header_option) {
return deflateInit2(s, level, Z_DEFLATED,
15
+ header_option,
8 ,
Z_DEFAULT_STRATEGY);
}
static int Process(z_stream* s, int flags) { return deflate(s, flags); }
static int Destroy(z_stream* s) { return deflateEnd(s); }
static constexpr bool kDataErrorPossible = false;
};
template <typename Op>
absl::Status ProcessZlib(const absl::Cord& input, absl::Cord* output, int level,
bool use_gzip_header) {
z_stream s = {};
internal::CordStreamManager<z_stream, 16 * 1024>
stream_manager(s, input, output);
const int header_option = use_gzip_header ? 16
: 0;
int err = Op::Init(&s, level, header_option);
if (err != Z_OK) {
ABSL_CHECK(false);
}
struct StreamDestroyer {
z_stream* s;
~StreamDestroyer() { Op::Destroy(s); }
} stream_destroyer{&s};
while (true) {
const bool input_complete = stream_manager.FeedInputAndOutputBuffers();
err = Op::Process(&s, input_complete ? Z_FINISH : Z_NO_FLUSH);
const bool made_progress = stream_manager.HandleOutput();
if (err == Z_OK) continue;
if (err == Z_BUF_ERROR && made_progress) continue;
break;
}
switch (err) {
case Z_STREAM_END:
if (!stream_manager.has_input_remaining()) {
return absl::OkStatus();
}
[[fallthrough]];
case Z_NEED_DICT:
case Z_DATA_ERROR:
case Z_BUF_ERROR:
if (!Op::kDataErrorPossible) {
ABSL_CHECK(false);
}
return absl::InvalidArgumentError("Error decoding zlib-compressed data");
default:
ABSL_CHECK(false);
}
ABSL_UNREACHABLE();
}
}
void Encode(const absl::Cord& input, absl::Cord* output,
const Options& options) {
ProcessZlib<DeflateOp>(input, output, options.level, options.use_gzip_header)
.IgnoreError();
}
absl::Status Decode(const absl::Cord& input, absl::Cord* output,
bool use_gzip_header) {
return ProcessZlib<InflateOp>(input, output, 0, use_gzip_header);
}
}
} | #include "tensorstore/internal/compression/zlib.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
namespace zlib = tensorstore::zlib;
class ZlibCompressorTest : public ::testing::TestWithParam<bool> {};
INSTANTIATE_TEST_SUITE_P(ZlibCompressorTestCases, ZlibCompressorTest,
::testing::Values(false, true));
TEST_P(ZlibCompressorTest, SmallRoundtrip) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result("abc"), decode_result("def");
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
TENSORSTORE_ASSERT_OK(
zlib::Decode(encode_result.Subcord(3, encode_result.size() - 3),
&decode_result, options.use_gzip_header));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST_P(ZlibCompressorTest, SmallRoundtripFragmented) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input = absl::MakeFragmentedCord(
{"The quick", " brown fox", " jumped over", " ", "the lazy dog."});
absl::Cord encode_result("abc"), decode_result("def");
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
std::vector<std::string> encode_result_fragments;
for (size_t i = 3; i < encode_result.size(); ++i) {
encode_result_fragments.push_back(std::string(encode_result.Subcord(i, 1)));
}
TENSORSTORE_ASSERT_OK(
zlib::Decode(absl::MakeFragmentedCord(encode_result_fragments),
&decode_result, options.use_gzip_header));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST_P(ZlibCompressorTest, LargeRoundtrip) {
const bool use_gzip_header = GetParam();
std::string input(100000, '\0');
unsigned char x = 0;
for (auto& v : input) {
v = x;
x += 7;
}
zlib::Options options{6, use_gzip_header};
absl::Cord encode_result, decode_result;
zlib::Encode(absl::Cord(input), &encode_result, options);
ASSERT_EQ(absl::OkStatus(), zlib::Decode(encode_result, &decode_result,
options.use_gzip_header));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, NonDefaultLevel) {
const bool use_gzip_header = GetParam();
zlib::Options options1{
0, use_gzip_header};
zlib::Options options2{9, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
zlib::Encode(input, &encode_result1, options1);
zlib::Encode(input, &encode_result2, options2);
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(
zlib::Decode(encode_result2, &decode_result, options2.use_gzip_header));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, DecodeCorruptData) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
{
absl::Cord encode_result, decode_result;
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted[0] = 0;
EXPECT_THAT(zlib::Decode(absl::Cord(corrupted), &decode_result,
options.use_gzip_header),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
{
absl::Cord encode_result, decode_result;
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted.resize(corrupted.size() - 1);
EXPECT_THAT(zlib::Decode(absl::Cord(corrupted), &decode_result,
options.use_gzip_header),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
} |
672 | cpp | google/tensorstore | neuroglancer_compressed_segmentation | tensorstore/internal/compression/neuroglancer_compressed_segmentation.cc | tensorstore/internal/compression/neuroglancer_compressed_segmentation_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_NEUROGLANCER_COMPRESSED_SEGMENTATION_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_NEUROGLANCER_COMPRESSED_SEGMENTATION_H_
#include <cstdint>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
namespace tensorstore {
namespace neuroglancer_compressed_segmentation {
template <class Label>
using EncodedValueCache = absl::flat_hash_map<std::vector<Label>, uint32_t>;
template <typename Label>
void EncodeBlock(const Label* input, const std::ptrdiff_t input_shape[3],
const std::ptrdiff_t input_byte_strides[3],
const std::ptrdiff_t block_shape[3], size_t base_offset,
size_t* encoded_bits_output, size_t* table_offset_output,
EncodedValueCache<Label>* cache, std::string* output);
template <typename Label>
void EncodeChannel(const Label* input, const std::ptrdiff_t input_shape[3],
const std::ptrdiff_t input_byte_strides[3],
const std::ptrdiff_t block_shape[3], std::string* output);
template <typename Label>
void EncodeChannels(const Label* input, const std::ptrdiff_t input_shape[3 + 1],
const std::ptrdiff_t input_byte_strides[3 + 1],
const std::ptrdiff_t block_shape[3], std::string* output);
template <typename Label>
bool DecodeBlock(size_t encoded_bits, const char* encoded_input,
const char* table_input, size_t table_size,
const std::ptrdiff_t block_shape[3],
const std::ptrdiff_t output_shape[3],
const std::ptrdiff_t output_byte_strides[3], Label* output);
template <typename Label>
bool DecodeChannel(std::string_view input, const std::ptrdiff_t block_shape[3],
const std::ptrdiff_t output_shape[3],
const std::ptrdiff_t output_byte_strides[3], Label* output);
template <typename Label>
bool DecodeChannels(std::string_view input, const std::ptrdiff_t block_shape[3],
const std::ptrdiff_t output_shape[3 + 1],
const std::ptrdiff_t output_byte_strides[3 + 1],
Label* output);
}
}
#endif
#include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <string_view>
#include <vector>
#include "absl/base/internal/endian.h"
#include "absl/container/flat_hash_map.h"
namespace tensorstore {
namespace neuroglancer_compressed_segmentation {
constexpr size_t kBlockHeaderSize = 2;
void WriteBlockHeader(size_t encoded_value_base_offset,
size_t table_base_offset, size_t encoding_bits,
void* output) {
absl::little_endian::Store32(output,
table_base_offset | (encoding_bits << 24));
absl::little_endian::Store32(static_cast<char*>(output) + 4,
encoded_value_base_offset);
}
template <typename Label>
void EncodeBlock(const Label* input, const ptrdiff_t input_shape[3],
const ptrdiff_t input_byte_strides[3],
const ptrdiff_t block_shape[3], size_t base_offset,
size_t* encoded_bits_output, size_t* table_offset_output,
EncodedValueCache<Label>* cache, std::string* output) {
if (input_shape[0] == 0 && input_shape[1] == 0 && input_shape[2] == 0) {
*encoded_bits_output = 0;
*table_offset_output = 0;
return;
}
constexpr size_t num_32bit_words_per_label = sizeof(Label) / 4;
absl::flat_hash_map<Label, uint32_t> seen_values;
std::vector<Label> seen_values_inv;
const auto ForEachElement = [&](auto func) {
auto* input_z = reinterpret_cast<const char*>(input);
for (ptrdiff_t z = 0; z < input_shape[0]; ++z) {
auto* input_y = input_z;
for (ptrdiff_t y = 0; y < input_shape[1]; ++y) {
auto* input_x = input_y;
for (ptrdiff_t x = 0; x < input_shape[2]; ++x) {
func(z, y, x, *reinterpret_cast<const Label*>(input_x));
input_x += input_byte_strides[2];
}
input_y += input_byte_strides[1];
}
input_z += input_byte_strides[0];
}
};
Label previous_value = input[0] + 1;
ForEachElement([&](size_t z, size_t y, size_t x, Label value) {
if (value != previous_value) {
previous_value = value;
if (seen_values.emplace(value, 0).second) {
seen_values_inv.push_back(value);
}
}
});
std::sort(seen_values_inv.begin(), seen_values_inv.end());
for (size_t i = 0; i < seen_values_inv.size(); ++i) {
seen_values[seen_values_inv[i]] = static_cast<uint32_t>(i);
}
size_t encoded_bits = 0;
if (seen_values.size() != 1) {
encoded_bits = 1;
while ((size_t(1) << encoded_bits) < seen_values.size()) {
encoded_bits *= 2;
}
}
*encoded_bits_output = encoded_bits;
const size_t encoded_size_32bits =
(encoded_bits * block_shape[0] * block_shape[1] * block_shape[2] + 31) /
32;
const size_t encoded_value_base_offset = output->size();
assert((encoded_value_base_offset - base_offset) % 4 == 0);
size_t elements_to_write = encoded_size_32bits;
bool write_table;
{
auto it = cache->find(seen_values_inv);
if (it == cache->end()) {
write_table = true;
elements_to_write += seen_values.size() * num_32bit_words_per_label;
*table_offset_output =
(encoded_value_base_offset - base_offset) / 4 + encoded_size_32bits;
} else {
write_table = false;
*table_offset_output = it->second;
}
}
output->resize(encoded_value_base_offset + elements_to_write * 4);
char* output_ptr = output->data() + encoded_value_base_offset;
ForEachElement([&](size_t z, size_t y, size_t x, Label value) {
uint32_t index = seen_values.at(value);
size_t output_offset = x + block_shape[2] * (y + block_shape[1] * z);
void* cur_ptr = output_ptr + output_offset * encoded_bits / 32 * 4;
absl::little_endian::Store32(
cur_ptr, absl::little_endian::Load32(cur_ptr) |
(index << (output_offset * encoded_bits % 32)));
});
if (write_table) {
output_ptr =
output->data() + encoded_value_base_offset + encoded_size_32bits * 4;
for (auto value : seen_values_inv) {
for (size_t word_i = 0; word_i < num_32bit_words_per_label; ++word_i) {
absl::little_endian::Store32(
output_ptr + word_i * 4,
static_cast<uint32_t>(value >> (32 * word_i)));
}
output_ptr += num_32bit_words_per_label * 4;
}
cache->emplace(seen_values_inv,
static_cast<uint32_t>(*table_offset_output));
}
}
template <class Label>
void EncodeChannel(const Label* input, const ptrdiff_t input_shape[3],
const ptrdiff_t input_byte_strides[3],
const ptrdiff_t block_shape[3], std::string* output) {
EncodedValueCache<Label> cache;
const size_t base_offset = output->size();
ptrdiff_t grid_shape[3];
size_t block_index_size = kBlockHeaderSize;
for (size_t i = 0; i < 3; ++i) {
grid_shape[i] = (input_shape[i] + block_shape[i] - 1) / block_shape[i];
block_index_size *= grid_shape[i];
}
output->resize(base_offset + block_index_size * 4);
ptrdiff_t block[3];
for (block[0] = 0; block[0] < grid_shape[0]; ++block[0]) {
for (block[1] = 0; block[1] < grid_shape[1]; ++block[1]) {
for (block[2] = 0; block[2] < grid_shape[2]; ++block[2]) {
const size_t block_offset =
block[2] + grid_shape[2] * (block[1] + grid_shape[1] * block[0]);
ptrdiff_t input_block_shape[3];
ptrdiff_t input_offset = 0;
for (size_t i = 0; i < 3; ++i) {
auto pos = block[i] * block_shape[i];
input_block_shape[i] = std::min(block_shape[i], input_shape[i] - pos);
input_offset += pos * input_byte_strides[i];
}
const size_t encoded_value_base_offset =
(output->size() - base_offset) / 4;
size_t encoded_bits, table_offset;
EncodeBlock(reinterpret_cast<const Label*>(
reinterpret_cast<const char*>(input) + input_offset),
input_block_shape, input_byte_strides, block_shape,
base_offset, &encoded_bits, &table_offset, &cache, output);
WriteBlockHeader(
encoded_value_base_offset, table_offset, encoded_bits,
output->data() + base_offset + block_offset * kBlockHeaderSize * 4);
}
}
}
}
template <class Label>
void EncodeChannels(const Label* input, const ptrdiff_t input_shape[3 + 1],
const ptrdiff_t input_byte_strides[3 + 1],
const ptrdiff_t block_shape[3], std::string* output) {
const size_t base_offset = output->size();
output->resize(base_offset + input_shape[0] * 4);
for (ptrdiff_t channel_i = 0; channel_i < input_shape[0]; ++channel_i) {
absl::little_endian::Store32(output->data() + base_offset + channel_i * 4,
(output->size() - base_offset) / 4);
EncodeChannel(
reinterpret_cast<const Label*>(reinterpret_cast<const char*>(input) +
input_byte_strides[0] * channel_i),
input_shape + 1, input_byte_strides + 1, block_shape, output);
}
}
void ReadBlockHeader(const void* header, size_t* encoded_value_base_offset,
size_t* table_base_offset, size_t* encoding_bits) {
auto h = absl::little_endian::Load64(header);
*table_base_offset = h & 0xffffff;
*encoding_bits = (h >> 24) & 0xff;
*encoded_value_base_offset = (h >> 32) & 0xffffff;
}
template <typename Label>
bool DecodeBlock(size_t encoded_bits, const char* encoded_input,
const char* table_input, size_t table_size,
const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3],
const ptrdiff_t output_byte_strides[3], Label* output) {
const auto for_each_position = [&](auto callback) {
auto* output_z = reinterpret_cast<char*>(output);
for (ptrdiff_t z = 0; z < output_shape[0]; ++z) {
auto* output_y = output_z;
for (ptrdiff_t y = 0; y < output_shape[1]; ++y) {
auto* output_x = output_y;
for (ptrdiff_t x = 0; x < output_shape[2]; ++x) {
auto& label = *reinterpret_cast<Label*>(output_x);
if (!callback(label, z, y, x)) return false;
output_x += output_byte_strides[2];
}
output_y += output_byte_strides[1];
}
output_z += output_byte_strides[0];
}
return true;
};
const auto read_label = [&](size_t index) -> Label {
if constexpr (sizeof(Label) == 4) {
return absl::little_endian::Load32(table_input + index * sizeof(Label));
} else {
return absl::little_endian::Load64(table_input + index * sizeof(Label));
}
};
if (encoded_bits == 0) {
if (table_size == 0) return false;
const Label label = read_label(0);
return for_each_position(
[&](Label& output_label, ptrdiff_t z, ptrdiff_t y, ptrdiff_t x) {
output_label = label;
return true;
});
}
const uint32_t encoded_value_mask = (1U << encoded_bits) - 1;
return for_each_position([&](Label& output_label, ptrdiff_t z, ptrdiff_t y,
ptrdiff_t x) {
size_t encoded_offset = x + block_shape[2] * (y + block_shape[1] * z);
auto index = absl::little_endian::Load32(
encoded_input + encoded_offset * encoded_bits / 32 * 4) >>
(encoded_offset * encoded_bits % 32) &
encoded_value_mask;
if (index >= table_size) return false;
output_label = read_label(index);
return true;
});
return true;
}
template <typename Label>
bool DecodeChannel(std::string_view input, const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3],
const ptrdiff_t output_byte_strides[3], Label* output) {
if ((input.size() % 4) != 0) return false;
ptrdiff_t grid_shape[3];
size_t block_index_size = kBlockHeaderSize;
for (size_t i = 0; i < 3; ++i) {
grid_shape[i] = (output_shape[i] + block_shape[i] - 1) / block_shape[i];
block_index_size *= grid_shape[i];
}
if (input.size() / 4 < block_index_size) {
return false;
}
ptrdiff_t block[3];
for (block[0] = 0; block[0] < grid_shape[0]; ++block[0]) {
for (block[1] = 0; block[1] < grid_shape[1]; ++block[1]) {
for (block[2] = 0; block[2] < grid_shape[2]; ++block[2]) {
const size_t block_offset =
block[2] + grid_shape[2] * (block[1] + grid_shape[1] * block[0]);
ptrdiff_t output_block_shape[3];
ptrdiff_t output_offset = 0;
for (size_t i = 0; i < 3; ++i) {
auto pos = block[i] * block_shape[i];
output_block_shape[i] =
std::min(block_shape[i], output_shape[i] - pos);
output_offset += pos * output_byte_strides[i];
}
size_t encoded_value_base_offset;
size_t encoded_bits, table_offset;
ReadBlockHeader(input.data() + block_offset * kBlockHeaderSize * 4,
&encoded_value_base_offset, &table_offset,
&encoded_bits);
if (encoded_bits > 32 || (encoded_bits & (encoded_bits - 1)) != 0) {
return false;
}
if (encoded_value_base_offset > input.size() / 4 ||
table_offset > input.size() / 4) {
return false;
}
const size_t encoded_size_32bits =
(encoded_bits * block_shape[0] * block_shape[1] * block_shape[2] +
31) /
32;
if ((encoded_value_base_offset + encoded_size_32bits) * 4 >
input.size()) {
return false;
}
auto* block_output = reinterpret_cast<Label*>(
reinterpret_cast<char*>(output) + output_offset);
const char* encoded_input =
input.data() + encoded_value_base_offset * 4;
const char* table_input = input.data() + table_offset * 4;
const size_t table_size =
(input.size() - table_offset * 4) / sizeof(Label);
if (!DecodeBlock(encoded_bits, encoded_input, table_input, table_size,
block_shape, output_block_shape, output_byte_strides,
block_output)) {
return false;
}
}
}
}
return true;
}
template <typename Label>
bool DecodeChannels(std::string_view input, const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3 + 1],
const ptrdiff_t output_byte_strides[3 + 1], Label* output) {
if ((input.size() % 4) != 0) return false;
if (input.size() / 4 < static_cast<size_t>(output_shape[0])) {
return false;
}
for (ptrdiff_t channel_i = 0; channel_i < output_shape[0]; ++channel_i) {
const size_t offset =
absl::little_endian::Load32(input.data() + channel_i * 4);
if (offset > input.size() / 4) {
return false;
}
if (!DecodeChannel(
input.substr(offset * 4), block_shape, output_shape + 1,
output_byte_strides + 1,
reinterpret_cast<Label*>(reinterpret_cast<char*>(output) +
output_byte_strides[0] * channel_i))) {
return false;
}
}
return true;
}
#define DO_INSTANTIATE(Label) \
template void EncodeBlock<Label>( \
const Label* input, const ptrdiff_t input_shape[3], \
const ptrdiff_t input_byte_strides[3], const ptrdiff_t block_shape[3], \
size_t base_offset, size_t* encoded_bits_output, \
size_t* table_offset_output, EncodedValueCache<Label>* cache, \
std::string* output); \
template void EncodeChannel<Label>( \
const Label* input, const ptrdiff_t input_shape[3], \
const ptrdiff_t input_byte_strides[3], const ptrdiff_t block_shape[3], \
std::string* output); \
template void EncodeChannels<Label>( \
const Label* input, const ptrdiff_t input_shape[3 + 1], \
const ptrdiff_t input_byte_strides[3 + 1], \
const ptrdiff_t block_shape[3], std::string* output); \
template bool DecodeBlock( \
size_t encoded_bits, const char* encoded_input, const char* table_input, \
size_t table_size, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3], const ptrdiff_t output_byte_strides[3], \
Label* output); \
template bool DecodeChannel<Label>( \
std::string_view input, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3], const ptrdiff_t output_byte_strides[3], \
Label* output); \
template bool DecodeChannels( \
std::string_view input, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3 + 1], \
const ptrdiff_t output_byte_strides[3 + 1], Label* output); \
DO_INSTANTIATE(uint32_t)
DO_INSTANTIATE(uint64_t)
#undef DO_INSTANTIATE
}
} | #include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
namespace {
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeBlock;
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeChannel;
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeChannels;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeBlock;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeChannel;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeChannels;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodedValueCache;
std::vector<uint32_t> AsVec(std::string_view s) {
EXPECT_EQ(0, s.size() % 4);
std::vector<uint32_t> out(s.size() / 4);
for (size_t i = 0; i < out.size(); ++i) {
out[i] = absl::little_endian::Load32(s.data() + i * 4);
}
return out;
}
std::string FromVec(std::vector<uint32_t> v) {
std::string s;
s.resize(v.size() * 4);
for (size_t i = 0; i < v.size(); ++i) {
absl::little_endian::Store32(s.data() + i * 4, v[i]);
}
return s;
}
template <typename T>
void TestBlockRoundTrip(std::vector<T> input,
const std::ptrdiff_t (&input_shape)[3],
const std::ptrdiff_t (&block_shape)[3],
size_t expected_encoded_bits,
size_t expected_table_offset,
std::vector<uint32_t> expected_output,
EncodedValueCache<T> expected_cache) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2], input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
size_t encoded_bits;
size_t table_offset;
EncodedValueCache<uint64_t> cache;
const size_t initial_offset = output.size();
EncodeBlock(input.data(), input_shape, input_byte_strides, block_shape,
initial_offset, &encoded_bits, &table_offset, &cache, &output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_EQ(expected_encoded_bits, encoded_bits);
EXPECT_EQ(expected_table_offset, table_offset);
EXPECT_EQ(expected_output, AsVec(output.substr(initial_offset)));
EXPECT_EQ(expected_cache, cache);
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeBlock(
encoded_bits, output.data() + initial_offset,
output.data() + initial_offset + table_offset * 4,
(output.size() - (initial_offset + table_offset * 4)) / sizeof(T),
block_shape, input_shape, input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
template <typename T>
void TestSingleChannelRoundTrip(std::vector<T> input,
const std::ptrdiff_t (&input_shape)[3],
const std::ptrdiff_t (&block_shape)[3],
std::vector<uint32_t> expected_output) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2], input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
const size_t initial_offset = output.size();
EncodeChannel(input.data(), input_shape, input_byte_strides, block_shape,
&output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_EQ(expected_output, AsVec(output.substr(initial_offset)));
std::vector<T> decoded_output(input.size());
std::vector<char> output_copy(output.begin() + initial_offset, output.end());
EXPECT_TRUE(DecodeChannel(
std::string_view(output_copy.data(), output_copy.size()), block_shape,
input_shape, input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
template <typename T>
void TestDecodeChannelError(std::string_view input,
const std::ptrdiff_t (&block_shape)[3],
const std::ptrdiff_t (&input_shape)[3]) {
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
std::vector<T> decoded_output(input_shape[0] * input_shape[1] *
input_shape[2]);
EXPECT_FALSE(DecodeChannel(input, block_shape, input_shape,
input_byte_strides, decoded_output.data()));
}
template <typename T>
void TestMultipleChannelsRoundTripBytes(
std::vector<T> input, const std::ptrdiff_t (&input_shape)[4],
const std::ptrdiff_t (&block_shape)[4],
std::vector<unsigned char> expected_output) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3],
input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[4] = {
input_shape[1] * input_shape[2] * input_shape[3] * s,
input_shape[2] * input_shape[3] * s, input_shape[3] * s, s};
const size_t initial_offset = output.size();
EncodeChannels(input.data(), input_shape, input_byte_strides, block_shape,
&output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_THAT(output.substr(initial_offset),
::testing::ElementsAreArray(expected_output));
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeChannels(output.substr(initial_offset), block_shape,
input_shape, input_byte_strides,
decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
TEST(EncodeBlockTest, Basic0) {
TestBlockRoundTrip<uint64_t>({3, 3, 3, 3},
{1, 2, 2},
{1, 2, 2},
0,
0,
{3, 0},
{{{3}, 0}});
}
TEST(EncodeBlockTest, Basic1) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 4, 4},
{1, 2, 2},
{1, 2, 2},
1,
1,
{0b1101, 3, 0, 4, 0},
{{{3, 4}, 1}});
}
TEST(EncodeBlockTest, SizeMismatch) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 4, 3},
{1, 2, 2},
{1, 2, 3},
1,
1,
{0b001001, 3, 0, 4, 0},
{{{3, 4}, 1}});
}
TEST(EncodeBlockTest, Basic2) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 5, 4},
{1, 2, 2},
{1, 2, 2},
2,
1,
{0b01100001, 3, 0, 4, 0, 5, 0},
{{{3, 4, 5}, 1}});
}
TEST(EncodeChannelTest, Basic) {
TestSingleChannelRoundTrip<uint64_t>(
{4, 3, 5, 4, 1, 3, 3, 3},
{2, 2, 2},
{1, 2, 2},
{5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0, 4, 0, 5, 0,
0b1110, 1, 0, 3, 0});
}
TEST(EncodeChannelTest, BasicCached) {
TestSingleChannelRoundTrip<uint64_t>(
{
4, 3, 5, 4,
1, 3, 3, 3,
3, 1, 1, 1,
5, 5, 3, 4,
},
{4, 2, 2},
{1, 2, 2},
{
9 | (2 << 24),
8,
16 | (1 << 24),
15,
16 | (1 << 24),
20,
9 | (2 << 24),
21,
0b01100001,
3,
0,
4,
0,
5,
0,
0b1110,
1,
0,
3,
0,
0b00000001,
0b01001010,
});
}
TEST(EncodeChannelTest, BasicCachedZeroBitsAtEnd) {
TestSingleChannelRoundTrip<uint64_t>(
{
3, 3, 3, 3,
3, 3, 3, 3,
3, 3, 3, 3,
3, 3, 3, 3,
},
{4, 2, 2},
{1, 2, 2},
{
8 | (0 << 24),
8,
8 | (0 << 24),
10,
8 | (0 << 24),
10,
8 | (0 << 24),
10,
3,
0,
});
}
TEST(EncodeChannelTest, BasicCached32) {
TestSingleChannelRoundTrip<uint32_t>(
{
4, 3, 5, 4,
1, 3, 3, 3,
3, 1, 1, 1,
5, 5, 3, 4,
},
{4, 2, 2},
{1, 2, 2},
{
9 | (2 << 24),
8,
13 | (1 << 24),
12,
13 | (1 << 24),
15,
9 | (2 << 24),
16,
0b01100001,
3,
4,
5,
0b1110,
1,
3,
0b00000001,
0b01001010,
});
}
TEST(EncodeChannelsTest, Basic1Channel1Block) {
TestMultipleChannelsRoundTripBytes<uint64_t>(
{4, 0, 4, 0},
{1, 1, 2, 2},
{1, 2, 2},
{
1, 0, 0, 0,
3, 0, 0, 1, 2, 0, 0, 0,
0b0101, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
4, 0, 0, 0,
0, 0, 0, 0,
});
}
TEST(DecodeChannelTest, SizeNotMultipleOf4) {
auto input = FromVec({5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
input.resize(input.size() - 1);
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, Truncated) {
auto input = FromVec({5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
input.resize(input.size() - 4);
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, NonPowerOf2EncodedBits) {
auto input = FromVec({5 | (3 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MoreThan32EncodedBits) {
auto input = FromVec({5 | (33 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MissingBlockHeaders) {
auto input = FromVec({5 | (3 << 24), 4, 12 | (1 << 24)});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, InvalidEncodedValueOffset) {
auto input = FromVec({5 | (2 << 24), 16, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, InvalidTableOffset) {
auto input = FromVec({16 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MissingEncodedValues) {
auto input = FromVec(
{5 | (2 << 24), 4, 0 | (1 << 24), 11, 0b01100001, 3, 0, 4, 0, 5, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
template <typename T>
void RandomRoundTrip(size_t max_block_size, size_t max_input_size,
size_t max_channels, size_t max_distinct_ids,
size_t num_iterations) {
absl::BitGen gen;
for (size_t iter = 0; iter < num_iterations; ++iter) {
std::ptrdiff_t block_shape[3];
std::ptrdiff_t input_shape[4];
input_shape[0] = absl::Uniform(gen, 1u, max_channels + 1);
for (int i = 0; i < 3; ++i) {
block_shape[i] = absl::Uniform(gen, 1u, max_block_size + 1);
input_shape[i + 1] = absl::Uniform(gen, 1u, max_input_size + 1);
}
std::vector<T> input(input_shape[0] * input_shape[1] * input_shape[2] *
input_shape[3]);
std::vector<T> labels(max_distinct_ids);
for (auto& label : labels) {
label = absl::Uniform<T>(gen);
}
for (auto& label : input) {
label = labels[absl::Uniform(gen, 0u, labels.size())];
}
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[4] = {
input_shape[1] * input_shape[2] * input_shape[3] * s,
input_shape[2] * input_shape[3] * s, input_shape[3] * s, s};
std::string output;
EncodeChannels(input.data(), input_shape, input_byte_strides, block_shape,
&output);
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeChannels(output, block_shape, input_shape,
input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
}
void RandomRoundTripBothDataTypes(size_t max_block_size, size_t max_input_size,
size_t max_channels, size_t max_distinct_ids,
size_t num_iterations) {
RandomRoundTrip<uint32_t>(max_block_size, max_input_size, max_channels,
max_distinct_ids, num_iterations);
RandomRoundTrip<uint64_t>(max_block_size, max_input_size, max_channels,
max_distinct_ids, num_iterations);
}
TEST(RoundTripTest, Random) {
RandomRoundTripBothDataTypes(4, 10,
3, 16,
100);
RandomRoundTripBothDataTypes(10, 16,
3, 1000,
100);
}
} |
673 | cpp | google/tensorstore | intrusive_red_black_tree | tensorstore/internal/container/intrusive_red_black_tree.cc | tensorstore/internal/container/intrusive_red_black_tree_test.cc | #ifndef TENSORSTORE_INTERNAL_CONTAINER_INTRUSIVE_RED_BLACK_TREE_H_
#define TENSORSTORE_INTERNAL_CONTAINER_INTRUSIVE_RED_BLACK_TREE_H_
#include <array>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
#include "absl/types/compare.h"
#include "tensorstore/internal/tagged_ptr.h"
namespace tensorstore {
namespace internal {
namespace intrusive_red_black_tree {
enum Color : bool { kRed = 0, kBlack = 1 };
enum Direction : bool { kLeft = 0, kRight = 1 };
inline constexpr Direction operator!(Direction d) {
return static_cast<Direction>(!static_cast<bool>(d));
}
template <typename Tag = void>
struct NodeBase;
template <>
struct NodeBase<void> {
NodeBase<>* rbtree_children_[2];
TaggedPtr<NodeBase<>, 1> rbtree_parent_;
};
template <typename Tag>
struct NodeBase : public NodeBase<void> {};
template <typename T, typename Tag = void>
struct LinkedListAccessor {
using Node = T*;
static Node Downcast(NodeBase<>* node) {
return static_cast<Node>(static_cast<NodeBase<Tag>*>(node));
}
static NodeBase<>* Upcast(Node node) {
return static_cast<NodeBase<Tag>*>(node);
}
static void SetPrev(Node node, Node prev) {
Upcast(node)->rbtree_children_[0] = Upcast(prev);
}
static void SetNext(Node node, Node next) {
Upcast(node)->rbtree_children_[1] = Upcast(next);
}
static Node GetPrev(Node node) {
return Downcast(Upcast(node)->rbtree_children_[0]);
}
static Node GetNext(Node node) {
return Downcast(Upcast(node)->rbtree_children_[1]);
}
};
template <typename Node, typename Tag = void>
class Tree;
template <typename Node, typename Tag = void, Direction Dir = kRight>
class Iterator {
public:
using Tree = intrusive_red_black_tree::Tree<Node, Tag>;
using value_type = Node;
using reference = Node&;
using pointer = Node*;
using difference_type = std::ptrdiff_t;
using iterator_category = std::bidirectional_iterator_tag;
Iterator(Node* node = nullptr) : node_(node) {}
explicit operator bool() const { return static_cast<bool>(node_); }
Node* to_pointer() const { return node_; }
Node* operator->() const { return node_; }
Node& operator*() const { return *node_; }
Iterator& operator++() {
assert(node_ != nullptr);
node_ = Tree::Traverse(*node_, Dir);
return *this;
}
Iterator operator++(int) {
auto temp = *this;
++*this;
return temp;
}
Iterator& operator--() {
assert(node_ != nullptr);
node_ = Tree::Traverse(*node_, !Dir);
return *this;
}
Iterator operator--(int) {
auto temp = *this;
--*this;
return temp;
}
friend bool operator==(const Iterator& a, const Iterator& b) {
return a.node_ == b.node_;
}
friend bool operator!=(const Iterator& a, const Iterator& b) {
return a.node_ != b.node_;
}
private:
Node* node_;
};
template <typename Node, typename Tag = void, Direction Dir = kRight>
class Range {
public:
using Tree = intrusive_red_black_tree::Tree<Node, Tag>;
using value_type = Node;
using reference = Node&;
using pointer = Node*;
using difference_type = std::ptrdiff_t;
using iterator = Iterator<Node, Tag, Dir>;
explicit Range(Node* begin, Node* end) : begin_(begin), end_(end) {}
Range(iterator begin, iterator end) : begin_(begin), end_(end) {}
Range(Tree& tree) : begin_(tree.ExtremeNode(!Dir)), end_(nullptr) {}
iterator begin() const { return begin_; }
iterator end() const { return end_; }
bool empty() const { return begin_ == end_; }
friend bool operator==(const Range& a, const Range& b) {
return a.begin_ == b.begin_ && a.end_ == b.end_;
}
friend bool operator!=(const Range& a, const Range& b) { return !(a == b); }
private:
Node* begin_;
Node* end_;
};
template <typename Node>
struct InsertPosition {
Node* adjacent;
Direction direction;
};
template <typename Node>
struct FindResult {
Node* node;
bool found;
Direction insert_direction;
Node* found_node() const { return found ? node : nullptr; }
InsertPosition<Node> insert_position() const {
return {node, insert_direction};
}
};
template <typename Node, typename Tag>
class Tree {
public:
using NodeBase = intrusive_red_black_tree::NodeBase<Tag>;
using iterator = Iterator<Node, Tag>;
using Range = intrusive_red_black_tree::Range<Node, Tag>;
using InsertPosition = intrusive_red_black_tree::InsertPosition<Node>;
using FindResult = intrusive_red_black_tree::FindResult<Node>;
constexpr static Direction kLeft = intrusive_red_black_tree::kLeft;
constexpr static Direction kRight = intrusive_red_black_tree::kRight;
Tree() = default;
Tree(const Tree&) = delete;
Tree(Tree&& other) = default;
Tree& operator=(const Tree&) = delete;
Tree& operator=(Tree&&) = default;
bool empty() { return !root_; }
Node* ExtremeNode(Direction dir);
iterator begin() { return ExtremeNode(kLeft); }
static iterator end() { return {}; }
template <typename Compare>
FindResult Find(Compare compare);
template <Direction BoundDirection, typename Predicate>
FindResult FindBound(Predicate predicate);
void Insert(InsertPosition position, Node& new_node);
void InsertExtreme(Direction dir, Node& new_node);
template <typename Compare, typename MakeNode>
std::pair<Node*, bool> FindOrInsert(Compare compare, MakeNode make_node);
static Tree Join(Tree& a_tree, Node& center, Tree& b_tree,
Direction a_dir = kLeft);
static Tree Join(Tree& a_tree, Tree& b_tree, Direction a_dir = kLeft);
std::array<Tree, 2> Split(Node& center);
struct FindSplitResult {
std::array<Tree, 2> trees;
Node* center;
};
template <typename Compare>
FindSplitResult FindSplit(Compare compare);
void Replace(Node& existing, Node& replacement);
static bool IsDisconnected(Node& node);
void Remove(Node& node);
static Node* Traverse(Node& x, Direction dir);
Node* root() { return Downcast(root_); }
private:
static Node* Downcast(intrusive_red_black_tree::NodeBase<>* node) {
return static_cast<Node*>(static_cast<NodeBase*>(node));
}
static intrusive_red_black_tree::NodeBase<>* Upcast(Node* node) {
return static_cast<NodeBase*>(node);
}
intrusive_red_black_tree::NodeBase<>* root_ = nullptr;
};
namespace ops {
using NodeData = NodeBase<>;
inline TaggedPtr<NodeData, 1> DisconnectedParentValue() { return {}; }
inline NodeData* Parent(NodeData* node) { return node->rbtree_parent_; }
inline Color GetColor(NodeData* node) {
return static_cast<Color>(node->rbtree_parent_.tag());
}
inline bool IsRed(NodeData* node) {
return node && ops::GetColor(node) == kRed;
}
inline NodeData*& Child(NodeData* node, Direction dir) {
return node->rbtree_children_[dir];
}
inline bool IsDisconnected(NodeData* node) {
return node->rbtree_parent_ == ops::DisconnectedParentValue();
}
NodeData* TreeExtremeNode(NodeData* root, Direction dir);
NodeData* Traverse(NodeData* x, Direction dir);
void Insert(NodeData*& root, NodeData* parent, Direction direction,
NodeData* new_node);
NodeData* Join(NodeData* a_tree, NodeData* center, NodeData* b_tree,
Direction a_dir);
NodeData* Join(NodeData* a_tree, NodeData* b_tree, Direction a_dir);
std::array<NodeData*, 2> Split(NodeData* root, NodeData* center);
std::array<NodeData*, 2> Split(NodeData* root, NodeData*& center, Direction dir,
bool found);
void InsertExtreme(NodeData*& root, Direction dir, NodeData* new_node);
void Remove(NodeData*& root, NodeData* z);
void Replace(NodeData*& root, NodeData* existing, NodeData* replacement);
}
template <typename Node, typename Tag>
template <typename Compare>
typename Tree<Node, Tag>::FindResult Tree<Node, Tag>::Find(Compare compare) {
FindResult result;
result.insert_direction = kLeft;
ops::NodeData* node = root_;
ops::NodeData* result_node = nullptr;
while (node) {
result_node = node;
const absl::weak_ordering c = compare(*Downcast(node));
if (c < 0) {
result.insert_direction = kLeft;
} else if (c > 0) {
result.insert_direction = kRight;
} else {
result.found = true;
result.node = Downcast(result_node);
return result;
}
node = ops::Child(node, result.insert_direction);
}
result.found = false;
result.node = Downcast(result_node);
return result;
}
template <typename Node, typename Tag>
template <Direction BoundDirection, typename Predicate>
typename Tree<Node, Tag>::FindResult Tree<Node, Tag>::FindBound(
Predicate predicate) {
FindResult result;
ops::NodeData* found = nullptr;
result.insert_direction = kLeft;
ops::NodeData* node = root_;
ops::NodeData* result_node = nullptr;
while (node) {
result_node = node;
auto satisfies = static_cast<Direction>(predicate(*Downcast(node)));
if (satisfies == BoundDirection) found = node;
result.insert_direction = satisfies;
node = ops::Child(node, satisfies);
}
if (found) {
result.found = true;
result.node = Downcast(found);
result.insert_direction = BoundDirection;
} else {
result.node = Downcast(result_node);
result.found = false;
}
return result;
}
template <typename Node, typename Tag>
void Tree<Node, Tag>::Insert(InsertPosition position, Node& new_node) {
ops::Insert(root_, Upcast(position.adjacent), position.direction,
Upcast(&new_node));
}
template <typename Node, typename Tag>
Tree<Node, Tag> Tree<Node, Tag>::Join(Tree& a_tree, Node& center, Tree& b_tree,
Direction a_dir) {
Tree<Node, Tag> joined;
joined.root_ = ops::Join(a_tree.root_, ¢er, b_tree.root_, a_dir);
a_tree.root_ = nullptr;
b_tree.root_ = nullptr;
return joined;
}
template <typename Node, typename Tag>
Tree<Node, Tag> Tree<Node, Tag>::Join(Tree& a_tree, Tree& b_tree,
Direction a_dir) {
Tree<Node, Tag> joined;
joined.root_ = ops::Join(a_tree.root_, b_tree.root_, a_dir);
a_tree.root_ = nullptr;
b_tree.root_ = nullptr;
return joined;
}
template <typename Node, typename Tag>
std::array<Tree<Node, Tag>, 2> Tree<Node, Tag>::Split(Node& center) {
auto split_nodes = ops::Split(root_, ¢er);
root_ = nullptr;
std::array<Tree<Node, Tag>, 2> split_trees;
split_trees[0].root_ = split_nodes[0];
split_trees[1].root_ = split_nodes[1];
return split_trees;
}
template <typename Node, typename Tag>
template <typename Compare>
typename Tree<Node, Tag>::FindSplitResult Tree<Node, Tag>::FindSplit(
Compare compare) {
FindSplitResult split_result;
auto find_result = this->Find(std::move(compare));
auto* center = Upcast(find_result.node);
auto split_nodes = ops::Split(root_, center, find_result.insert_direction,
find_result.found);
root_ = nullptr;
split_result.center = Downcast(center);
split_result.trees[0].root_ = split_nodes[0];
split_result.trees[1].root_ = split_nodes[1];
return split_result;
}
template <typename Node, typename Tag>
void Tree<Node, Tag>::InsertExtreme(Direction dir, Node& new_node) {
ops::InsertExtreme(root_, dir, Upcast(&new_node));
}
template <typename Node, typename Tag>
template <typename Compare, typename MakeNode>
std::pair<Node*, bool> Tree<Node, Tag>::FindOrInsert(Compare compare,
MakeNode make_node) {
auto find_result = Find(std::move(compare));
if (find_result.found) return {find_result.node, false};
auto* new_node = make_node();
Insert(find_result.insert_position(), *new_node);
return {new_node, true};
}
template <typename Node, typename Tag>
void Tree<Node, Tag>::Remove(Node& node) {
ops::Remove(root_, Upcast(&node));
}
template <typename Node, typename Tag>
void Tree<Node, Tag>::Replace(Node& existing, Node& replacement) {
ops::Replace(root_, Upcast(&existing), Upcast(&replacement));
}
template <typename Node, typename Tag>
Node* Tree<Node, Tag>::ExtremeNode(Direction dir) {
return Downcast(ops::TreeExtremeNode(root_, dir));
}
template <typename Node, typename Tag>
bool Tree<Node, Tag>::IsDisconnected(Node& node) {
return ops::IsDisconnected(Upcast(&node));
}
template <typename Node, typename Tag>
Node* Tree<Node, Tag>::Traverse(Node& x, Direction dir) {
return Downcast(ops::Traverse(Upcast(&x), dir));
}
}
}
}
#endif
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include <stddef.h>
#include <array>
#include <cassert>
#include <utility>
namespace tensorstore {
namespace internal {
namespace intrusive_red_black_tree {
namespace ops {
inline void SetParent(NodeData* node, NodeData* parent) {
node->rbtree_parent_ = {parent, node->rbtree_parent_.tag()};
}
inline void SetColor(NodeData* node, Color color) {
node->rbtree_parent_.set_tag(color);
}
inline Direction ChildDir(NodeData* node) {
return static_cast<Direction>(node != ops::Child(ops::Parent(node), kLeft));
}
inline NodeData* Grandparent(NodeData* node) {
return ops::Parent(ops::Parent(node));
}
void Rotate(NodeData*& root, NodeData* x, Direction dir) {
auto* y = ops::Child(x, !dir);
ops::Child(x, !dir) = ops::Child(y, dir);
if (ops::Child(y, dir)) {
ops::SetParent(ops::Child(y, dir), x);
}
ops::SetParent(y, ops::Parent(x));
if (!ops::Parent(x)) {
root = y;
} else {
ops::Child(ops::Parent(x), ops::ChildDir(x)) = y;
}
ops::Child(y, dir) = x;
ops::SetParent(x, y);
}
bool InsertFixup(NodeData*& root, NodeData* z) {
assert(ops::IsRed(z));
while (ops::IsRed(ops::Parent(z))) {
Direction dir = ops::ChildDir(ops::Parent(z));
if (NodeData* y = ops::Child(ops::Grandparent(z), !dir); ops::IsRed(y)) {
ops::SetColor(ops::Parent(z), kBlack);
ops::SetColor(y, kBlack);
ops::SetColor(ops::Grandparent(z), kRed);
z = ops::Grandparent(z);
} else {
if (ops::ChildDir(z) == !dir) {
z = ops::Parent(z);
ops::Rotate(root, z, dir);
}
ops::SetColor(ops::Parent(z), kBlack);
ops::SetColor(ops::Grandparent(z), kRed);
ops::Rotate(root, ops::Grandparent(z), !dir);
assert(!ops::IsRed(ops::Parent(z)));
break;
}
}
const Color existing_color = ops::GetColor(root);
ops::SetColor(root, kBlack);
return existing_color == kRed;
}
struct TreeWithBlackHeight {
NodeData* root = nullptr;
size_t black_height = 0;
};
size_t BlackHeight(NodeData* node) {
size_t black_height = 0;
while (node) {
if (ops::GetColor(node) == kBlack) ++black_height;
node = ops::Child(node, kLeft);
}
return black_height;
}
TreeWithBlackHeight Join(TreeWithBlackHeight a_tree, NodeData* center,
TreeWithBlackHeight b_tree, Direction a_dir) {
assert(a_tree.black_height == ops::BlackHeight(a_tree.root));
assert(b_tree.black_height == ops::BlackHeight(b_tree.root));
if (a_tree.black_height < b_tree.black_height) {
a_dir = !a_dir;
std::swap(a_tree, b_tree);
}
size_t difference = a_tree.black_height - b_tree.black_height;
NodeData* a_graft = a_tree.root;
NodeData* a_graft_parent = nullptr;
while (true) {
if (!ops::IsRed(a_graft)) {
if (difference == 0) break;
--difference;
}
a_graft_parent = a_graft;
a_graft = ops::Child(a_graft, !a_dir);
}
assert(!ops::IsRed(a_graft)); | #include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/compare.h"
namespace {
namespace rbtree = tensorstore::internal::intrusive_red_black_tree;
namespace ops = tensorstore::internal::intrusive_red_black_tree::ops;
int CheckInvariants(ops::NodeData* x) {
if (!x) return 1;
ops::NodeData* c1 = ops::Child(x, rbtree::kLeft);
ops::NodeData* c2 = ops::Child(x, rbtree::kRight);
if (c1) {
EXPECT_EQ(x, ops::Parent(c1));
}
if (c2) {
EXPECT_EQ(x, ops::Parent(c2));
}
if (ops::GetColor(x) == rbtree::kRed) {
EXPECT_FALSE(ops::IsRed(c1));
EXPECT_FALSE(ops::IsRed(c2));
}
int lh = CheckInvariants(c1);
int rh = CheckInvariants(c2);
EXPECT_EQ(lh, rh);
if (ops::GetColor(x) == rbtree::kRed) {
return lh;
} else {
return lh + 1;
}
}
template <typename Node, typename Tag, typename Compare>
void CheckInvariants(rbtree::Tree<Node, Tag>& x, Compare compare) {
auto* root = static_cast<rbtree::NodeBase<Tag>*>(x.root());
if (!root) return;
EXPECT_EQ(rbtree::kBlack, ops::GetColor(root));
CheckInvariants(root);
EXPECT_TRUE(std::is_sorted(
x.begin(), x.end(), [&](Node& a, Node& b) { return compare(a, b) < 0; }));
}
struct Set {
struct Node : public rbtree::NodeBase<> {
int value;
};
static void FormatNode(std::string& out, const std::string& prefix,
Node* node, bool dir) {
out += prefix;
out += (dir == rbtree::kLeft) ? "|- " : " - ";
if (!node) {
out += "null";
} else {
out += std::to_string(node->value);
out += ops::GetColor(node) == rbtree::kBlack ? "(blk)" : "(red)";
}
out += '\n';
if (!node) return;
std::string child_prefix =
prefix + ((dir == rbtree::kLeft) ? "| " : " ");
for (int dir = 0; dir < 2; ++dir) {
FormatNode(out, child_prefix,
static_cast<Node*>(
ops::Child(node, static_cast<rbtree::Direction>(dir))),
static_cast<rbtree::Direction>(dir));
}
}
static std::string FormatTree(rbtree::Tree<Node>& tree) {
std::string out;
FormatNode(out, "", tree.root(), rbtree::kRight);
return out;
}
static auto CompareToKey(int key) {
return [key](Node& node) -> absl::weak_ordering {
return tensorstore::internal::DoThreeWayComparison(std::less<int>{}, key,
node.value);
};
}
static auto CompareNodes() {
return [](Node& a, Node& b) -> absl::weak_ordering {
return tensorstore::internal::CompareResultAsWeakOrdering(a.value -
b.value);
};
}
static std::vector<int> Elements(rbtree::Tree<Node>& tree) {
std::vector<int> elements;
for (auto& node : tree) {
elements.push_back(node.value);
}
return elements;
}
using Tree = rbtree::Tree<Node>;
Tree tree;
std::set<int> golden_set;
void CheckTreeInvariants() {
SCOPED_TRACE("\n" + FormatTree(tree));
CheckInvariants(tree, CompareNodes());
}
bool Contains(int key) {
bool result = tree.Find(CompareToKey(key)).found;
EXPECT_EQ(result, golden_set.count(key) == 1);
return result;
}
Node* FindNode(int key) {
auto* node = tree.Find(CompareToKey(key)).found_node();
assert(node);
return node;
}
bool Insert(int key) {
auto [node, inserted] = tree.FindOrInsert(CompareToKey(key), [&] {
auto* n = new Node;
n->value = key;
return n;
});
EXPECT_EQ(key, node->value);
CheckTreeInvariants();
EXPECT_EQ(inserted, golden_set.insert(key).second);
return inserted;
}
bool Erase(int key) {
auto node = tree.Find(CompareToKey(key)).found_node();
bool result;
if (!node) {
result = false;
} else {
tree.Remove(*node);
delete node;
CheckTreeInvariants();
result = true;
}
EXPECT_EQ(static_cast<int>(result), golden_set.erase(key));
return result;
}
void CheckElements() {
EXPECT_THAT(Elements(), ::testing::ElementsAreArray(golden_set.begin(),
golden_set.end()));
}
void CheckSplitJoin(int key) {
auto orig_elements = Elements();
auto split_result = tree.FindSplit([&](Node& node) -> absl::weak_ordering {
return tensorstore::internal::DoThreeWayComparison(std::less<>{}, key,
node.value);
});
SCOPED_TRACE("Key=" + std::to_string(key) +
"\nLeft tree:\n" + FormatTree(split_result.trees[0]) +
"\nRight tree:\n" + FormatTree(split_result.trees[1]));
for (int i = 0; i < 2; ++i) {
CheckInvariants(split_result.trees[i], CompareNodes());
}
std::vector<int> elements_a = Elements(split_result.trees[0]);
std::vector<int> elements_b = Elements(split_result.trees[1]);
std::vector<int> combined_elements = elements_a;
if (split_result.center) {
EXPECT_EQ(key, split_result.center->value);
combined_elements.push_back(split_result.center->value);
}
combined_elements.insert(combined_elements.end(), elements_b.begin(),
elements_b.end());
EXPECT_THAT(combined_elements, ::testing::ElementsAreArray(orig_elements));
if (split_result.center) {
tree = Tree::Join(split_result.trees[0], *split_result.center,
split_result.trees[1]);
} else {
tree = Tree::Join(split_result.trees[0], split_result.trees[1]);
}
CheckTreeInvariants();
CheckElements();
}
void CheckSplitJoin() {
auto orig_elements = Elements();
if (orig_elements.empty()) {
CheckSplitJoin(0);
} else {
int min = orig_elements.front() - 1;
int max = orig_elements.back() + 1;
for (int x = min; x <= max; ++x) {
SCOPED_TRACE(x);
CheckSplitJoin(x);
}
}
}
std::vector<int> Elements() { return Elements(tree); }
~Set() {
for (auto it = tree.begin(); it != tree.end();) {
auto next = std::next(it);
tree.Remove(*it);
delete &*it;
it = next;
}
}
};
TEST(SetTest, SimpleInsert1) {
Set rbtree_set;
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(1);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(2);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(3);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
}
TEST(SetTest, SimpleInsert2) {
Set rbtree_set;
Set::Tree::Range empty_range = rbtree_set.tree;
EXPECT_TRUE(empty_range.empty());
EXPECT_EQ(empty_range, empty_range);
rbtree_set.Insert(5);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(8);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(1);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(3);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(9);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(7);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(0);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
Set::Tree::Range full_range = rbtree_set.tree;
EXPECT_FALSE(full_range.empty());
EXPECT_EQ(full_range, full_range);
EXPECT_NE(full_range, empty_range);
EXPECT_EQ(full_range.begin(), rbtree_set.tree.begin());
EXPECT_EQ(full_range.end(), rbtree_set.tree.end());
Set::Tree::Range partial_range(rbtree_set.FindNode(1),
rbtree_set.FindNode(5));
EXPECT_NE(partial_range, full_range);
EXPECT_NE(partial_range, empty_range);
std::set<int> partial_elements;
for (auto& node : partial_range) {
partial_elements.insert(node.value);
}
EXPECT_THAT(partial_elements, ::testing::ElementsAre(1, 3));
}
TEST(SetTest, RandomInsert) {
Set rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
for (int i = 0; i < 20; ++i) {
const int key = absl::Uniform(gen, 0, kMaxKey);
rbtree_set.Contains(key);
rbtree_set.Insert(key);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
}
}
TEST(SetTest, RandomInsertRemove) {
Set rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
for (int i = 0; i < 50; ++i) {
const int key = absl::Uniform(gen, 0, kMaxKey);
if (absl::Bernoulli(gen, 0.5)) {
rbtree_set.Insert(key);
} else {
rbtree_set.Erase(key);
}
}
}
struct MultiSet {
using Pair = std::pair<int, int>;
struct Node : public rbtree::NodeBase<> {
Pair value;
};
struct Compare {
bool operator()(const Pair& a, const Pair& b) const {
return a.first < b.first;
}
};
using Tree = rbtree::Tree<Node>;
Tree tree;
std::multiset<Pair, Compare> golden_set;
constexpr static auto ThreeWayCompare = [](Node& a, Node& b) {
return tensorstore::internal::CompareResultAsWeakOrdering(a.value.first -
b.value.first);
};
void CheckTreeInvariants() { CheckInvariants(tree, ThreeWayCompare); }
void Insert(Pair value) {
tree.FindOrInsert(
[&](Node& node) {
return value.first < node.value.first ? absl::weak_ordering::less
: absl::weak_ordering::greater;
},
[&] {
auto* n = new Node;
n->value = value;
return n;
});
CheckTreeInvariants();
golden_set.insert(value);
}
void CheckElements() {
EXPECT_THAT(Elements(), ::testing::ElementsAreArray(golden_set.begin(),
golden_set.end()));
}
std::vector<Pair> Elements() {
std::vector<Pair> elements;
for (auto& node : tree) {
elements.push_back(node.value);
}
return elements;
}
~MultiSet() {
for (auto it = tree.begin(); it != tree.end();) {
auto next = std::next(it);
tree.Remove(*it);
delete &*it;
it = next;
}
}
};
TEST(MultiSetTest, SimpleInsert1) {
MultiSet rbtree_set;
rbtree_set.Insert({1, 2});
rbtree_set.CheckElements();
rbtree_set.Insert({2, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({1, 1});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 1});
rbtree_set.CheckElements();
EXPECT_THAT(
rbtree_set.Elements(),
::testing::ElementsAre(::testing::Pair(1, 2), ::testing::Pair(1, 1),
::testing::Pair(2, 0), ::testing::Pair(3, 0),
::testing::Pair(3, 1)));
}
TEST(MultiSetTest, SimpleInsert2) {
MultiSet rbtree_set;
rbtree_set.Insert({5, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({8, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({1, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({9, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({7, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({0, 0});
rbtree_set.CheckElements();
}
TEST(MultiSetTest, RandomInsert) {
MultiSet rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
constexpr int kMaxValue = 100;
for (int i = 0; i < 20; ++i) {
rbtree_set.Insert(
{absl::Uniform(gen, 0, kMaxKey), absl::Uniform(gen, 0, kMaxValue)});
rbtree_set.CheckElements();
}
}
} |
674 | cpp | google/tensorstore | sha256 | tensorstore/internal/digest/sha256.cc | tensorstore/internal/digest/sha256_test.cc | #ifndef TENSORSTORE_INTERNAL_DIGEST_SHA256_H_
#define TENSORSTORE_INTERNAL_DIGEST_SHA256_H_
#include <stdint.h>
#include <array>
#include <string_view>
#include "absl/strings/cord.h"
#include <openssl/sha.h>
namespace tensorstore {
namespace internal {
class SHA256Digester {
public:
SHA256Digester() { SHA256_Init(&ctx_); }
void Write(std::string_view src) {
SHA256_Update(&ctx_, src.data(), src.size());
}
void Write(const absl::Cord& cord);
using DigestType = std::array<uint8_t, SHA256_DIGEST_LENGTH>;
DigestType Digest() {
DigestType digest;
SHA256_Final(digest.data(), &ctx_);
return digest;
}
private:
SHA256_CTX ctx_;
};
}
}
#endif
#include "tensorstore/internal/digest/sha256.h"
#include <string_view>
#include "absl/strings/cord.h"
namespace tensorstore {
namespace internal {
void SHA256Digester::Write(const absl::Cord& cord) {
for (std::string_view chunk : cord.Chunks()) {
Write(chunk);
}
}
}
} | #include "tensorstore/internal/digest/sha256.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
using ::tensorstore::internal::SHA256Digester;
namespace {
TEST(Sha256Digest, Basic) {
auto digest = [](auto input) {
SHA256Digester digester;
digester.Write(input);
auto digest = digester.Digest();
return absl::BytesToHexString(std::string_view(
reinterpret_cast<char*>(digest.data()), digest.size()));
};
EXPECT_THAT(
digest(std::string_view("abc")),
testing::Eq(
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"));
EXPECT_THAT(
digest(absl::Cord("abc")),
testing::Eq(
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"));
}
} |
675 | cpp | google/tensorstore | client_credentials | tensorstore/internal/grpc/client_credentials.cc | tensorstore/internal/grpc/client_credentials_test.cc | #ifndef TENSORSTORE_INTERNAL_GRPC_CLIENT_CREDENTIALS_H_
#define TENSORSTORE_INTERNAL_GRPC_CLIENT_CREDENTIALS_H_
#include <memory>
#include "grpcpp/security/credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
struct GrpcClientCredentials final
: public internal::ContextResourceTraits<GrpcClientCredentials> {
static constexpr char id[] = "grpc_client_credentials";
struct Spec {};
struct Resource {
std::shared_ptr<::grpc::ChannelCredentials> GetCredentials();
private:
friend struct GrpcClientCredentials;
std::shared_ptr<::grpc::ChannelCredentials> credentials_;
};
static constexpr Spec Default() { return {}; }
static constexpr auto JsonBinder() { return internal_json_binding::Object(); }
static Result<Resource> Create(
const Spec& spec, internal::ContextResourceCreationContext context) {
return Resource{};
}
static Spec GetSpec(const Resource& resource,
const internal::ContextSpecBuilder& builder) {
return Spec{};
}
static bool Use(tensorstore::Context context,
std::shared_ptr<::grpc::ChannelCredentials> credentials);
};
}
#endif
#include "tensorstore/internal/grpc/client_credentials.h"
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/security/credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
namespace tensorstore {
namespace {
ABSL_CONST_INIT static absl::Mutex credentials_mu(absl::kConstInit);
const internal::ContextResourceRegistration<GrpcClientCredentials>
grpc_client_credentials_registration;
}
bool GrpcClientCredentials::Use(
tensorstore::Context context,
std::shared_ptr<::grpc::ChannelCredentials> credentials) {
auto resource = context.GetResource<GrpcClientCredentials>().value();
absl::MutexLock l(&credentials_mu);
bool result = (resource->credentials_ == nullptr);
resource->credentials_ = std::move(credentials);
return result;
}
std::shared_ptr<::grpc::ChannelCredentials>
GrpcClientCredentials::Resource::GetCredentials() {
absl::MutexLock l(&credentials_mu);
if (credentials_) return credentials_;
return grpc::InsecureChannelCredentials();
}
} | #include "tensorstore/internal/grpc/client_credentials.h"
#include <memory>
#include <gtest/gtest.h>
#include "grpcpp/security/credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::GrpcClientCredentials;
TEST(GrpcClientCredentials, Use) {
auto use = grpc::experimental::LocalCredentials(LOCAL_TCP);
auto ctx = tensorstore::Context::Default();
EXPECT_TRUE(GrpcClientCredentials::Use(ctx, use));
auto a = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
EXPECT_EQ(a.get(), use.get());
}
TEST(GrpcClientCredentials, Default) {
auto ctx = tensorstore::Context::Default();
auto a = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
auto b = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
EXPECT_NE(a.get(), b.get());
}
} |
676 | cpp | google/tensorstore | server_credentials | tensorstore/internal/grpc/server_credentials.cc | tensorstore/internal/grpc/server_credentials_test.cc | #ifndef TENSORSTORE_INTERNAL_GRPC_SERVER_CREDENTIALS_H_
#define TENSORSTORE_INTERNAL_GRPC_SERVER_CREDENTIALS_H_
#include <memory>
#include "grpcpp/security/server_credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
struct GrpcServerCredentials final
: public internal::ContextResourceTraits<GrpcServerCredentials> {
static constexpr char id[] = "grpc_server_credentials";
struct Spec {};
struct Resource {
std::shared_ptr<::grpc::ServerCredentials> GetCredentials();
private:
friend struct GrpcServerCredentials;
std::shared_ptr<::grpc::ServerCredentials> credentials_;
};
static constexpr Spec Default() { return {}; }
static constexpr auto JsonBinder() { return internal_json_binding::Object(); }
static Result<Resource> Create(
const Spec& spec, internal::ContextResourceCreationContext context) {
return Resource{};
}
static Spec GetSpec(const Resource& resource,
const internal::ContextSpecBuilder& builder) {
return Spec{};
}
static bool Use(tensorstore::Context context,
std::shared_ptr<::grpc::ServerCredentials> credentials);
};
}
#endif
#include "tensorstore/internal/grpc/server_credentials.h"
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace {
ABSL_CONST_INIT static absl::Mutex credentials_mu(absl::kConstInit);
const internal::ContextResourceRegistration<GrpcServerCredentials>
grpc_server_credentials_registration;
}
bool GrpcServerCredentials::Use(
tensorstore::Context context,
std::shared_ptr<::grpc::ServerCredentials> credentials) {
auto resource = context.GetResource<GrpcServerCredentials>().value();
absl::MutexLock l(&credentials_mu);
bool result = (resource->credentials_ == nullptr);
resource->credentials_ = std::move(credentials);
return result;
}
std::shared_ptr<::grpc::ServerCredentials>
GrpcServerCredentials::Resource::GetCredentials() {
absl::MutexLock l(&credentials_mu);
if (credentials_) return credentials_;
return grpc::InsecureServerCredentials();
}
} | #include "tensorstore/internal/grpc/server_credentials.h"
#include <gtest/gtest.h>
#include "grpcpp/security/server_credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::GrpcServerCredentials;
TEST(GrpcServerCredentials, Use) {
auto use = grpc::experimental::LocalServerCredentials(LOCAL_TCP);
auto ctx = tensorstore::Context::Default();
EXPECT_TRUE(GrpcServerCredentials::Use(ctx, use));
auto a = ctx.GetResource<GrpcServerCredentials>().value()->GetCredentials();
EXPECT_EQ(a.get(), use.get());
}
TEST(GrpcServerCredentials, Default) {
auto ctx = tensorstore::Context::Default();
auto a = ctx.GetResource<GrpcServerCredentials>().value()->GetCredentials();
auto b = ctx.GetResource<GrpcServerCredentials>().value()->GetCredentials();
EXPECT_NE(a.get(), b.get());
}
} |
677 | cpp | google/tensorstore | utils | tensorstore/internal/grpc/utils.cc | tensorstore/internal/grpc/utils_test.cc | #ifndef TENSORSTORE_INTERNAL_GRPC_UTILS_H_
#define TENSORSTORE_INTERNAL_GRPC_UTILS_H_
#include <grpcpp/support/status.h>
#include "absl/status/status.h"
#include "tensorstore/internal/source_location.h"
namespace tensorstore {
namespace internal {
absl::Status GrpcStatusToAbslStatus(
grpc::Status s, SourceLocation loc = SourceLocation::current());
grpc::Status AbslStatusToGrpcStatus(const absl::Status& status);
}
}
#endif
#include "tensorstore/internal/grpc/utils.h"
#include <grpcpp/support/status.h>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/status.h"
#define TENSORSTORE_STATUS_ASSERT(x, y) \
static_assert(static_cast<int>(grpc::StatusCode::x) == \
static_cast<int>(absl::StatusCode::y))
TENSORSTORE_STATUS_ASSERT(CANCELLED, kCancelled);
TENSORSTORE_STATUS_ASSERT(UNKNOWN, kUnknown);
TENSORSTORE_STATUS_ASSERT(INVALID_ARGUMENT, kInvalidArgument);
TENSORSTORE_STATUS_ASSERT(DEADLINE_EXCEEDED, kDeadlineExceeded);
TENSORSTORE_STATUS_ASSERT(NOT_FOUND, kNotFound);
TENSORSTORE_STATUS_ASSERT(ALREADY_EXISTS, kAlreadyExists);
TENSORSTORE_STATUS_ASSERT(PERMISSION_DENIED, kPermissionDenied);
TENSORSTORE_STATUS_ASSERT(RESOURCE_EXHAUSTED, kResourceExhausted);
TENSORSTORE_STATUS_ASSERT(FAILED_PRECONDITION, kFailedPrecondition);
TENSORSTORE_STATUS_ASSERT(ABORTED, kAborted);
TENSORSTORE_STATUS_ASSERT(OUT_OF_RANGE, kOutOfRange);
TENSORSTORE_STATUS_ASSERT(UNIMPLEMENTED, kUnimplemented);
TENSORSTORE_STATUS_ASSERT(INTERNAL, kInternal);
TENSORSTORE_STATUS_ASSERT(UNAVAILABLE, kUnavailable);
TENSORSTORE_STATUS_ASSERT(DATA_LOSS, kDataLoss);
TENSORSTORE_STATUS_ASSERT(UNAUTHENTICATED, kUnauthenticated);
#undef TENSORSTORE_STATUS_ASSERT
namespace tensorstore {
namespace internal {
absl::Status GrpcStatusToAbslStatus(grpc::Status s, SourceLocation loc) {
if (s.ok()) return absl::OkStatus();
auto absl_code = static_cast<absl::StatusCode>(s.error_code());
absl::Status status(absl_code, s.error_message());
MaybeAddSourceLocation(status, loc);
if (!s.error_details().empty()) {
status.SetPayload("grpc.Status.details", absl::Cord(s.error_details()));
}
return status;
}
grpc::Status AbslStatusToGrpcStatus(const absl::Status& status) {
if (status.ok()) return grpc::Status::OK;
auto grpc_code = static_cast<grpc::StatusCode>(status.code());
return grpc::Status(grpc_code, std::string(status.message()));
}
}
} | #include "tensorstore/internal/grpc/utils.h"
#include <grpcpp/support/status.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
namespace {
using ::tensorstore::internal::AbslStatusToGrpcStatus;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
TEST(StatusToGrpcStatus, Basic) {
EXPECT_EQ(grpc::Status::OK.error_code(),
AbslStatusToGrpcStatus(absl::OkStatus()).error_code());
}
TEST(GrpcStatusToStatus, Basic) {
EXPECT_EQ(absl::OkStatus(), GrpcStatusToAbslStatus(grpc::Status::OK));
}
} |
678 | cpp | google/tensorstore | find | tensorstore/internal/riegeli/find.cc | tensorstore/internal/riegeli/find_test.cc | #ifndef TENSORSTORE_INTERNAL_RIEGELI_FIND_H_
#define TENSORSTORE_INTERNAL_RIEGELI_FIND_H_
#include <string_view>
#include "riegeli/bytes/reader.h"
namespace tensorstore {
namespace internal {
bool StartsWith(riegeli::Reader &reader, std::string_view needle);
bool FindFirst(riegeli::Reader &reader, std::string_view needle);
bool FindLast(riegeli::Reader &reader, std::string_view needle);
}
}
#endif
#include "tensorstore/internal/riegeli/find.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cstring>
#include <optional>
#include <string_view>
#include "riegeli/bytes/reader.h"
namespace tensorstore {
namespace internal {
bool StartsWith(riegeli::Reader &reader, std::string_view needle) {
return reader.ok() &&
reader.Pull(needle.size()) &&
memcmp(reader.cursor(), needle.data(), needle.size()) == 0;
}
bool FindFirst(riegeli::Reader &reader, std::string_view needle) {
while (true) {
if (!reader.Pull(needle.size())) break;
auto end = reader.cursor() + reader.available();
auto pos = std::search(reader.cursor(), end, needle.begin(), needle.end());
if (pos != end) {
reader.move_cursor(pos - reader.cursor());
return true;
}
reader.move_cursor(1 + reader.available() - needle.size());
}
return false;
}
bool FindLast(riegeli::Reader &reader, std::string_view needle) {
if (reader.SupportsSize()) {
auto size = reader.Size();
if (size && reader.Pull(*size)) {
auto found_pos = std::string_view(reader.cursor(), *size).rfind(needle);
if (found_pos == std::string_view::npos) return false;
return reader.Seek(found_pos + reader.pos());
}
}
std::optional<uint64_t> found;
while (reader.ok()) {
for (size_t available = reader.available(); available > needle.size();
available = reader.available()) {
if (memcmp(reader.cursor(), needle.data(), needle.size()) == 0) {
found = reader.pos();
}
const char *pos = static_cast<const char *>(
memchr(reader.cursor() + 1, needle[0], available - 1));
if (pos == nullptr) {
reader.move_cursor(available);
break;
}
reader.move_cursor(pos - reader.cursor());
}
if (!reader.Pull(needle.size() - reader.available())) break;
}
return found.has_value() && reader.Seek(*found);
}
}
} | #include "tensorstore/internal/riegeli/find.h"
#include <stddef.h>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "riegeli/bytes/string_reader.h"
namespace {
using ::tensorstore::internal::FindFirst;
using ::tensorstore::internal::FindLast;
using ::tensorstore::internal::StartsWith;
static constexpr unsigned char kData[] = {
0x17, 0x16, 0xa1, 0xcb, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xfe, 0xff,
0x03, 0x04, 0xbb, 0xcc, 0xc7, 0xb6, 0xbe, 0x5d, 0x7c, 0x2d, 0x23, 0x44,
0xa0, 0xbe, 0x13, 0x1b, 0x9a, 0x2d, 0xf2, 0x13, 0x6a, 0xfb, 0xad, 0xdb,
0x73, 0xf9, 0x3d, 0xbc, 0x5d, 0x7c, 0x6f, 0x41, 0xc0, 0xad, 0xf3, 0x31,
0x79, 0x7f, 0x89, 0xb2, 0xe4, 0xa9, 0xf5, 0x9d, 0xc0, 0x30, 0x23, 0x32,
0x99, 0x2c, 0x16, 0x42, 0xf5, 0x48, 0xd1, 0x79, 0xdb, 0x98, 0xb9, 0xc3,
0x6c, 0xa6, 0x50, 0xcd, 0x86, 0xb6, 0xd3, 0xa7, 0x57, 0x3b, 0xe6, 0x1d,
0xa5, 0xe2, 0x79, 0xe9, 0x2d, 0x19, 0xec, 0xa6, 0xf3, 0xa3, 0x50, 0x65,
0x03, 0x04, 0xbb, 0xcc, 0x1a, 0xc9, 0xec, 0xb2, 0xa6, 0x3e, 0xe0, 0x49,
0x6a, 0x30, 0xd7, 0x1f, 0x90, 0x08, 0x1c, 0x2a, 0x6b, 0xbd, 0x06, 0x9c,
0xef, 0xd2, 0x79, 0x20, 0x64, 0xbc, 0xb7, 0x75, 0xbb, 0xcd, 0xcc, 0xa8,
0x49, 0x8b, 0x30, 0x4f, 0x73, 0x7c, 0xb5, 0x6e, 0x08, 0x1b, 0xc2, 0x7f,
0xfb, 0xb1, 0xc4, 0x49, 0x89, 0x74, 0xe7, 0x8e, 0x9d, 0x6f, 0x44, 0x14,
0xbd, 0xdc, 0x6a, 0xd9, 0xcb, 0x53, 0x2b, 0xdc, 0x48, 0x6c, 0xa3, 0x14,
0x4e, 0xc0, 0x3b, 0x6b, 0x47, 0x50, 0xd5, 0x97, 0x84, 0x30, 0xd5, 0x28,
0x03, 0x04, 0xbb, 0xcc, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xfe, 0xff,
};
constexpr const unsigned char kLiteral1[4] = {0x03, 0x04, 0xbb, 0xcc};
constexpr const unsigned char kLiteral2[3] = {0xff, 0xfe, 0xff};
TEST(FindTest, FindFirst) {
const std::string_view literal1(reinterpret_cast<const char*>(kLiteral1),
sizeof(kLiteral1));
const std::string_view literal2(reinterpret_cast<const char*>(kLiteral2),
sizeof(kLiteral2));
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kData),
sizeof(kData));
size_t positions[3] = {0, 0, 0};
for (int i = 0; i < 3; ++i) {
EXPECT_TRUE(FindFirst(string_reader, literal1));
EXPECT_TRUE(StartsWith(string_reader, literal1));
positions[i] = string_reader.pos();
string_reader.Skip(sizeof(kLiteral1));
}
EXPECT_FALSE(FindFirst(string_reader, literal1));
EXPECT_THAT(positions, ::testing::ElementsAre(12, 96, 180));
string_reader.Seek(0);
EXPECT_TRUE(FindFirst(string_reader, literal2));
EXPECT_THAT(string_reader.pos(), 9);
}
TEST(FindTest, FindLast) {
const std::string_view literal1(reinterpret_cast<const char*>(kLiteral1),
sizeof(kLiteral1));
const std::string_view literal2(reinterpret_cast<const char*>(kLiteral2),
sizeof(kLiteral2));
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kData),
sizeof(kData));
EXPECT_TRUE(FindLast(string_reader, literal1));
EXPECT_TRUE(StartsWith(string_reader, literal1));
EXPECT_THAT(string_reader.pos(), 180);
string_reader.Seek(0);
EXPECT_TRUE(FindLast(string_reader, literal2));
EXPECT_THAT(string_reader.pos(), 189);
}
} |
679 | cpp | google/tensorstore | array_endian_codec | tensorstore/internal/riegeli/array_endian_codec.cc | tensorstore/internal/riegeli/array_endian_codec_test.cc | #ifndef TENSORSTORE_INTERNAL_RIEGELI_ARRAY_ENDIAN_CODEC_H_
#define TENSORSTORE_INTERNAL_RIEGELI_ARRAY_ENDIAN_CODEC_H_
#include <stddef.h>
#include <memory>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
inline absl::Cord MakeCordFromSharedPtr(std::shared_ptr<const void> ptr,
size_t size) {
std::string_view s(static_cast<const char*>(ptr.get()), size);
return absl::MakeCordFromExternal(
s, [ptr = std::move(ptr)](std::string_view s) mutable { ptr.reset(); });
}
[[nodiscard]] bool EncodeArrayEndian(SharedArrayView<const void> decoded,
endian encoded_endian,
ContiguousLayoutOrder order,
riegeli::Writer& writer);
Result<SharedArray<const void>> DecodeArrayEndian(
riegeli::Reader& reader, DataType dtype, span<const Index> decoded_shape,
endian encoded_endian, ContiguousLayoutOrder order);
absl::Status DecodeArrayEndian(riegeli::Reader& reader, endian encoded_endian,
ContiguousLayoutOrder order,
ArrayView<void> decoded);
}
}
#endif
#include "tensorstore/internal/riegeli/array_endian_codec.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/base/chain.h"
#include "riegeli/bytes/copy_all.h"
#include "riegeli/bytes/limiting_reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
auto& contiguous_bytes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/internal/riegeli/contiguous_bytes", "");
auto& noncontiguous_bytes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/internal/riegeli/noncontiguous_bytes", "");
}
[[nodiscard]] bool EncodeArrayEndian(SharedArrayView<const void> decoded,
endian encoded_endian,
ContiguousLayoutOrder order,
riegeli::Writer& writer) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(decoded.dtype().id())];
assert(functions.copy != nullptr);
if ((encoded_endian == endian::native ||
functions.swap_endian_inplace == nullptr) &&
IsContiguousLayout(decoded, order)) {
const size_t length = decoded.num_elements() * decoded.dtype().size();
if (writer.PrefersCopying()) {
return writer.Write(std::string_view(
reinterpret_cast<const char*>(decoded.data()), length));
}
return writer.Write(
internal::MakeCordFromSharedPtr(std::move(decoded.pointer()), length));
}
const internal::ElementwiseFunction<1, void*>* write_func =
encoded_endian == endian::native ? &functions.write_native_endian
: &functions.write_swapped_endian;
return internal::IterateOverArrays(
{write_func, &writer},
nullptr, {order, include_repeated_elements}, decoded);
}
namespace {
class ContiguousBufferSinkWriter : public riegeli::Writer {
public:
std::shared_ptr<const void> data;
size_t expected_length;
size_t expected_alignment;
void DoFail() { Fail(absl::UnimplementedError("")); }
bool PushSlow(size_t min_length, size_t recommended_length) override {
DoFail();
return false;
}
bool ValidateContiguousBuffer(std::string_view buf) {
if (buf.size() != expected_length ||
(reinterpret_cast<uintptr_t>(buf.data()) % expected_alignment) != 0) {
DoFail();
return false;
}
return true;
}
template <typename T>
bool WriteCordLike(T&& src) {
if (this->data) {
DoFail();
return false;
}
auto buf = src.TryFlat();
if (!buf) {
DoFail();
return false;
}
if (!ValidateContiguousBuffer(*buf)) return false;
auto data =
std::make_shared<internal::remove_cvref_t<T>>(std::forward<T>(src));
buf = data->TryFlat();
if (!buf) {
DoFail();
return false;
}
if (!ValidateContiguousBuffer(*buf)) return false;
this->data = std::shared_ptr<const void>(std::move(data), buf->data());
return true;
}
bool WriteSlow(const riegeli::Chain& src) override {
return WriteCordLike(src);
}
bool WriteSlow(const absl::Cord& src) override { return WriteCordLike(src); }
};
}
Result<SharedArray<const void>> DecodeArrayEndian(
riegeli::Reader& reader, DataType dtype, span<const Index> decoded_shape,
endian encoded_endian, ContiguousLayoutOrder order) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
size_t expected_length = dtype.size() * ProductOfExtents(decoded_shape);
const auto may_be_contiguous = [&] {
if (encoded_endian != endian::native &&
functions.swap_endian_inplace != nullptr) {
return false;
}
if (!reader.SupportsRewind()) {
return false;
}
if (!reader.SupportsSize()) {
return false;
}
auto size_opt = reader.Size();
if (!size_opt) return false;
if (*size_opt < expected_length ||
*size_opt - expected_length != reader.pos()) {
return false;
}
return true;
};
if (may_be_contiguous()) {
auto pos = reader.pos();
ContiguousBufferSinkWriter buffer_sink_writer;
buffer_sink_writer.expected_length = expected_length;
buffer_sink_writer.expected_alignment = dtype->alignment;
if (riegeli::CopyAll(reader, buffer_sink_writer, expected_length).ok()) {
absl::Status status;
if (functions.validate) {
if (!(*functions.validate)[IterationBufferKind::kContiguous](
nullptr, {1, static_cast<Index>(expected_length)},
IterationBufferPointer(
const_cast<void*>(buffer_sink_writer.data.get()), 0,
dtype.size()),
&status)) {
return status;
}
}
contiguous_bytes.IncrementBy(expected_length);
return tensorstore::SharedArray<const void>(
SharedElementPointer<const void>(std::move(buffer_sink_writer.data),
dtype),
decoded_shape, order);
}
if (!reader.Seek(pos)) {
return reader.status();
}
}
auto decoded =
tensorstore::AllocateArray(decoded_shape, order, default_init, dtype);
TENSORSTORE_RETURN_IF_ERROR(
DecodeArrayEndian(reader, encoded_endian, order, decoded));
reader.VerifyEnd();
if (!reader.ok()) {
return reader.status();
}
noncontiguous_bytes.IncrementBy(expected_length);
return decoded;
}
absl::Status DecodeArrayEndian(riegeli::Reader& reader, endian encoded_endian,
ContiguousLayoutOrder order,
ArrayView<void> decoded) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(decoded.dtype().id())];
assert(functions.copy != nullptr);
riegeli::LimitingReader limiting_reader(
&reader, riegeli::LimitingReaderBase::Options().set_exact_length(
decoded.dtype().size() * decoded.num_elements()));
[[maybe_unused]] const auto unused_result = internal::IterateOverArrays(
{encoded_endian == endian::native ? &functions.read_native_endian
: &functions.read_swapped_endian,
&limiting_reader},
nullptr, {order, include_repeated_elements}, decoded);
if (!limiting_reader.VerifyEndAndClose()) {
return limiting_reader.status();
}
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/riegeli/array_endian_codec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/string_reader.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zlib/zlib_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using tensorstore::AllocateArray;
using tensorstore::c_order;
using tensorstore::ContiguousLayoutOrder;
using tensorstore::DataType;
using tensorstore::dtype_v;
using tensorstore::endian;
using tensorstore::fortran_order;
using tensorstore::Index;
using tensorstore::IsContiguousLayout;
using tensorstore::MatchesStatus;
using tensorstore::Result;
using tensorstore::SharedArray;
using tensorstore::span;
using tensorstore::internal::DecodeArrayEndian;
using tensorstore::internal::EncodeArrayEndian;
using tensorstore::internal::FlatCordBuilder;
Result<absl::Cord> EncodeArrayAsCord(SharedArray<const void> array,
endian endianness,
ContiguousLayoutOrder order) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
if (EncodeArrayEndian(array, endianness, order, writer) && writer.Close()) {
return encoded;
}
return writer.status();
}
Result<SharedArray<const void>> DecodeArrayFromCord(
DataType dtype, span<const Index> decoded_shape, absl::Cord encoded,
endian endianness, ContiguousLayoutOrder order) {
riegeli::CordReader reader{&encoded};
return DecodeArrayEndian(reader, dtype, decoded_shape, endianness, order);
}
template <typename T = uint32_t>
SharedArray<const void> MakeTestArray(ContiguousLayoutOrder order = c_order,
Index a = 1000, Index b = 2000) {
auto c_array = AllocateArray<T>({a, b}, order, tensorstore::default_init);
for (Index a_i = 0; a_i < a; ++a_i) {
for (Index b_i = 0; b_i < b; ++b_i) {
c_array(a_i, b_i) = static_cast<T>(a_i * b + b_i);
}
}
return c_array;
}
TEST(EncodeArrayEndianTest, ContiguousLayout) {
auto c_array = MakeTestArray();
auto f_array = tensorstore::MakeCopy(c_array, fortran_order);
Index num_elements = c_array.num_elements();
ASSERT_TRUE(IsContiguousLayout(c_array, c_order));
ASSERT_TRUE(IsContiguousLayout(f_array, fortran_order));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord c_encoded,
EncodeArrayAsCord(c_array, endian::native, c_order));
{
auto flat = c_encoded.TryFlat();
ASSERT_TRUE(flat);
EXPECT_EQ(reinterpret_cast<const char*>(c_array.data()), flat->data());
EXPECT_EQ(num_elements * c_array.dtype().size(), flat->size());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord f_encoded,
EncodeArrayAsCord(f_array, endian::native, fortran_order));
{
auto flat = f_encoded.TryFlat();
ASSERT_TRUE(flat);
EXPECT_EQ(reinterpret_cast<const char*>(f_array.data()), flat->data());
EXPECT_EQ(num_elements * c_array.dtype().size(), flat->size());
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord encoded,
EncodeArrayAsCord(c_array, endian::native, fortran_order));
EXPECT_EQ(f_encoded, encoded);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord encoded,
EncodeArrayAsCord(f_array, endian::native, c_order));
EXPECT_EQ(c_encoded, encoded);
}
}
Result<SharedArray<const void>> RoundTripArrayViaCord(
SharedArray<const void> array, endian endianness,
ContiguousLayoutOrder order) {
TENSORSTORE_ASSIGN_OR_RETURN(auto encoded,
EncodeArrayAsCord(array, endianness, order));
return DecodeArrayFromCord(array.dtype(), array.shape(), encoded, endianness,
order);
}
template <typename T = uint16_t>
void TestRoundTripNoCopy(ContiguousLayoutOrder order) {
auto orig_array = MakeTestArray<T>(order);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, RoundTripArrayViaCord(orig_array, endian::native, order));
ASSERT_EQ(orig_array.data(), decoded.data());
}
template <typename T = uint16_t>
void TestRoundTripCopy(ContiguousLayoutOrder order, endian endianness) {
auto orig_array = MakeTestArray<T>(order, 2, 3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, RoundTripArrayViaCord(orig_array, endianness, order));
ASSERT_TRUE(tensorstore::AreArraysIdenticallyEqual(orig_array, decoded))
<< "orig_array=" << orig_array << ", decoded=" << decoded;
}
TEST(EncodeArrayEndianTest, BigEndian) {
auto orig_array = MakeTestArray<uint16_t>(c_order, 2, 3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded, EncodeArrayAsCord(orig_array, endian::big, c_order));
EXPECT_THAT(encoded.Flatten(), ::testing::ElementsAreArray({
0,
0,
0,
1,
0,
2,
0,
3,
0,
4,
0,
5,
}));
}
TEST(DecodeArrayEndianTest, BigEndian) {
auto orig_array = MakeTestArray<uint16_t>(c_order, 2, 3);
std::string encoded{
0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5,
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded,
DecodeArrayFromCord(orig_array.dtype(), orig_array.shape(),
absl::Cord(encoded), endian::big, c_order));
EXPECT_EQ(orig_array, decoded);
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyCOrder) {
TestRoundTripNoCopy(c_order);
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyCOrderBool) {
TestRoundTripNoCopy<bool>(c_order);
}
TEST(DecodeArrayEndianTest, InvalidBool) {
std::string encoded{0, 1, 2, 1};
EXPECT_THAT(DecodeArrayFromCord(dtype_v<bool>, {{2, 2}}, absl::Cord(encoded),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid bool value: 2; at byte 2"));
}
TEST(DecodeArrayEndianTest, InvalidBoolNoCopy) {
std::string encoded;
FlatCordBuilder builder(1000 * 2000);
std::fill_n(builder.data(), builder.size(), 0);
builder.data()[builder.size() - 1] = 2;
EXPECT_THAT(
DecodeArrayFromCord(dtype_v<bool>, {{1000, 2000}},
std::move(builder).Build(), endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid bool value: 2"));
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyFOrder) {
TestRoundTripNoCopy(fortran_order);
}
TEST(EncodeArrayEndianTest, RoundTripCopyCOrderBig) {
TestRoundTripCopy(c_order, endian::big);
}
TEST(EncodeArrayEndianTest, RoundTripCopyCOrderLittle) {
TestRoundTripCopy(c_order, endian::little);
}
TEST(EncodeArrayEndianTest, RoundTripCopyFOrderBig) {
TestRoundTripCopy(fortran_order, endian::big);
}
TEST(EncodeArrayEndianTest, RoundTripCopyFOrderLittle) {
TestRoundTripCopy(fortran_order, endian::little);
}
TEST(DecodeArrayEndianTest, StringReader) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4, 5,
};
riegeli::StringReader reader{encoded};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded,
DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order));
EXPECT_EQ(orig_array, decoded);
}
TEST(DecodeArrayEndianTest, LengthTooShort) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4,
};
riegeli::StringReader reader{encoded};
EXPECT_THAT(
DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Not enough data.*"));
}
TEST(DecodeArrayEndianTest, LengthTooLong) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4, 5, 6,
};
riegeli::StringReader reader{encoded};
EXPECT_THAT(DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"End of data expected.*"));
}
TEST(EncodeArrayEndianTest, Zlib) {
auto orig_array = MakeTestArray<uint16_t>(c_order);
absl::Cord encoded;
{
riegeli::ZlibWriter writer{riegeli::CordWriter{&encoded}};
ASSERT_TRUE(EncodeArrayEndian(orig_array, endian::native, c_order, writer));
ASSERT_TRUE(writer.Close());
}
{
riegeli::ZlibReader reader{riegeli::CordReader{encoded}};
EXPECT_THAT(DecodeArrayEndian(reader, orig_array.dtype(),
orig_array.shape(), endian::native, c_order),
::testing::Optional(orig_array));
}
}
TEST(DecodeArrayEndianTest, Misaligned) {
int a = 1000, b = 2000;
int num_elements = a * b;
size_t buffer_size = 1000 * 2000 * 2 + 1;
std::unique_ptr<char[]> source(new char[1000 * 2000 * 2 + 1]);
for (int i = 0; i < num_elements; ++i) {
uint16_t x = static_cast<uint16_t>(i);
memcpy(&source[i * 2 + 1], &x, 2);
}
auto cord = absl::MakeCordFromExternal(
std::string_view(source.get() + 1, buffer_size - 1), [] {});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, DecodeArrayFromCord(dtype_v<uint16_t>, {{1000, 2000}}, cord,
endian::native, c_order));
ASSERT_NE(decoded.data(), &source[1]);
EXPECT_THAT(decoded, MakeTestArray<uint16_t>(c_order));
}
TEST(DecodeArrayEndianTest, Fragmented) {
auto c_array = MakeTestArray<uint16_t>();
size_t total_bytes = c_array.num_elements() * c_array.dtype().size();
std::vector<absl::Cord> parts{
absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(c_array.data()),
total_bytes / 2),
[] {}),
absl::MakeCordFromExternal(
std::string_view(
reinterpret_cast<const char*>(c_array.data()) + total_bytes / 2,
total_bytes / 2),
[] {})};
absl::Cord cord = absl::MakeFragmentedCord(parts);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, DecodeArrayFromCord(dtype_v<uint16_t>, {{1000, 2000}}, cord,
endian::native, c_order));
EXPECT_THAT(decoded, MakeTestArray<uint16_t>(c_order));
}
} |
680 | cpp | google/tensorstore | admission_queue | tensorstore/internal/rate_limiter/admission_queue.cc | tensorstore/internal/rate_limiter/admission_queue_test.cc | #ifndef TENSORSTORE_INTERNAL_RATE_LIMITER_ADMISSION_QUEUE_H_
#define TENSORSTORE_INTERNAL_RATE_LIMITER_ADMISSION_QUEUE_H_
#include <cstddef>
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
namespace tensorstore {
namespace internal {
class AdmissionQueue : public RateLimiter {
public:
AdmissionQueue(size_t limit);
~AdmissionQueue() override = default;
size_t limit() const { return limit_; }
size_t in_flight() const {
absl::MutexLock l(&mutex_);
return in_flight_;
}
void Admit(RateLimiterNode* node, RateLimiterNode::StartFn fn) override;
void Finish(RateLimiterNode* node) override;
private:
const size_t limit_;
size_t in_flight_ ABSL_GUARDED_BY(mutex_) = 0;
};
}
}
#endif
#include "tensorstore/internal/rate_limiter/admission_queue.h"
#include <stddef.h>
#include <cassert>
#include <limits>
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
namespace tensorstore {
namespace internal {
AdmissionQueue::AdmissionQueue(size_t limit)
: limit_(limit == 0 ? std::numeric_limits<size_t>::max() : limit) {}
void AdmissionQueue::Admit(RateLimiterNode* node, RateLimiterNode::StartFn fn) {
assert(node->next_ == nullptr);
assert(node->prev_ == nullptr);
assert(node->start_fn_ == nullptr);
node->start_fn_ = fn;
{
absl::MutexLock lock(&mutex_);
if (in_flight_++ >= limit_) {
internal::intrusive_linked_list::InsertBefore(RateLimiterNodeAccessor{},
&head_, node);
return;
}
}
RunStartFunction(node);
}
void AdmissionQueue::Finish(RateLimiterNode* node) {
assert(node->next_ == nullptr);
RateLimiterNode* next_node = nullptr;
{
absl::MutexLock lock(&mutex_);
in_flight_--;
next_node = head_.next_;
if (next_node == &head_) return;
internal::intrusive_linked_list::Remove(RateLimiterNodeAccessor{},
next_node);
}
RunStartFunction(next_node);
}
}
} | #include "tensorstore/internal/rate_limiter/admission_queue.h"
#include <stddef.h>
#include <atomic>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/util/executor.h"
namespace {
using ::tensorstore::Executor;
using ::tensorstore::ExecutorTask;
using ::tensorstore::internal::AdmissionQueue;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal::RateLimiterNode;
struct Node : public RateLimiterNode, public AtomicReferenceCount<Node> {
AdmissionQueue* queue_;
ExecutorTask task_;
Node(AdmissionQueue* queue, ExecutorTask task)
: queue_(queue), task_(std::move(task)) {}
~Node() { queue_->Finish(this); }
static void Start(void* task) {
IntrusivePtr<Node> self(reinterpret_cast<Node*>(task), adopt_object_ref);
std::move(self->task_)();
}
};
TEST(AdmissionQueueTest, Basic) {
AdmissionQueue queue(1);
std::atomic<size_t> done{0};
EXPECT_EQ(1, queue.limit());
EXPECT_EQ(0, queue.in_flight());
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] { done++; });
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
EXPECT_EQ(100, done);
}
} |
681 | cpp | google/tensorstore | scaling_rate_limiter | tensorstore/internal/rate_limiter/scaling_rate_limiter.cc | tensorstore/internal/rate_limiter/scaling_rate_limiter_test.cc | #ifndef TENSORSTORE_INTERNAL_RATE_LIMITER_SCALING_RATE_LIMITER_H_
#define TENSORSTORE_INTERNAL_RATE_LIMITER_SCALING_RATE_LIMITER_H_
#include <functional>
#include "absl/time/time.h"
#include "tensorstore/internal/rate_limiter/token_bucket_rate_limiter.h"
namespace tensorstore {
namespace internal {
class DoublingRateLimiter : public TokenBucketRateLimiter {
public:
DoublingRateLimiter(double initial_rate, absl::Duration doubling_time);
DoublingRateLimiter(double initial_rate, absl::Duration doubling_time,
std::function<absl::Time()> clock);
~DoublingRateLimiter() override = default;
double initial_rate() const { return initial_rate_; }
absl::Duration doubling_time() const { return doubling_time_; }
double TokensToAdd(absl::Time current, absl::Time previous) const override;
absl::Duration GetSchedulerDelay() const override;
private:
const double initial_rate_;
const absl::Duration doubling_time_;
const double a_;
};
class ConstantRateLimiter : public TokenBucketRateLimiter {
public:
explicit ConstantRateLimiter(double initial_rate);
ConstantRateLimiter(double initial_rate, std::function<absl::Time()> clock);
~ConstantRateLimiter() override = default;
double initial_rate() const { return initial_rate_; }
double TokensToAdd(absl::Time current, absl::Time previous) const override;
absl::Duration GetSchedulerDelay() const override;
private:
const double initial_rate_;
const absl::Duration r_;
};
}
}
#endif
#include "tensorstore/internal/rate_limiter/scaling_rate_limiter.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <functional>
#include <limits>
#include <utility>
#include "absl/log/absl_check.h"
#include "absl/time/time.h"
#include "tensorstore/internal/rate_limiter/token_bucket_rate_limiter.h"
namespace tensorstore {
namespace internal {
namespace {
double GetLogA(absl::Duration doubling_time) {
if (doubling_time <= absl::ZeroDuration() ||
doubling_time == absl::InfiniteDuration()) {
return 0;
}
return 0.69314718055994530941723212145817656 /
absl::ToDoubleSeconds(doubling_time);
}
double GetMaxAvailable(double initial_rate) {
return std::min(initial_rate * 1000.0, 2000.0);
}
}
DoublingRateLimiter::DoublingRateLimiter(double initial_rate,
absl::Duration doubling_time)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate)),
initial_rate_(initial_rate),
doubling_time_(doubling_time),
a_(GetLogA(doubling_time)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
ABSL_CHECK_GT(a_, 0);
}
DoublingRateLimiter::DoublingRateLimiter(double initial_rate,
absl::Duration doubling_time,
std::function<absl::Time()> clock)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate), std::move(clock)),
initial_rate_(initial_rate),
doubling_time_(doubling_time),
a_(GetLogA(doubling_time)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
ABSL_CHECK_GT(a_, 0);
}
double DoublingRateLimiter::TokensToAdd(absl::Time current,
absl::Time previous) const {
double int_current =
std::exp(a_ * absl::ToDoubleSeconds(current - start_time_));
double int_prev =
std::exp(a_ * absl::ToDoubleSeconds(previous - start_time_));
return initial_rate_ * (int_current - int_prev) / a_;
}
absl::Duration DoublingRateLimiter::GetSchedulerDelay() const {
return absl::Milliseconds(10);
}
ConstantRateLimiter::ConstantRateLimiter(double initial_rate)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate)),
initial_rate_(initial_rate),
r_(absl::Seconds(1.0 / initial_rate)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
}
ConstantRateLimiter::ConstantRateLimiter(double initial_rate,
std::function<absl::Time()> clock)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate), std::move(clock)),
initial_rate_(initial_rate),
r_(absl::Seconds(1.0 / initial_rate)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
}
double ConstantRateLimiter::TokensToAdd(absl::Time current,
absl::Time previous) const {
return initial_rate_ * absl::ToDoubleSeconds(current - previous);
}
absl::Duration ConstantRateLimiter::GetSchedulerDelay() const {
return std::max(r_, absl::Milliseconds(10));
}
}
} | #include "tensorstore/internal/rate_limiter/scaling_rate_limiter.h"
#include <stddef.h>
#include <atomic>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/util/executor.h"
namespace {
using ::tensorstore::Executor;
using ::tensorstore::ExecutorTask;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::ConstantRateLimiter;
using ::tensorstore::internal::DoublingRateLimiter;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal::RateLimiter;
using ::tensorstore::internal::RateLimiterNode;
struct Node : public RateLimiterNode, public AtomicReferenceCount<Node> {
RateLimiter* queue_;
ExecutorTask task_;
Node(RateLimiter* queue, ExecutorTask task)
: queue_(queue), task_(std::move(task)) {}
~Node() { queue_->Finish(this); }
static void Start(void* task) {
IntrusivePtr<Node> self(reinterpret_cast<Node*>(task), adopt_object_ref);
std::move(self->task_)();
}
};
TEST(ConstantRateLimiter, Basic) {
absl::Time now = absl::Now();
ConstantRateLimiter queue(0.2, [&now]() { return now; });
EXPECT_EQ(0.2, queue.initial_rate());
EXPECT_EQ(now, queue.start_time());
EXPECT_EQ(now, queue.last_update());
EXPECT_EQ(0, queue.available());
EXPECT_EQ(0, queue.TokensToAdd(now, now));
EXPECT_EQ(2, queue.TokensToAdd(now + absl::Seconds(10), now));
EXPECT_EQ(60, queue.TokensToAdd(now + absl::Seconds(300), now));
std::atomic<size_t> done{0};
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] {
done++;
});
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
now += absl::Seconds(10);
queue.PeriodicCallForTesting();
EXPECT_EQ(2, done);
now += absl::Seconds(100);
queue.PeriodicCallForTesting();
EXPECT_EQ(22, done);
now += absl::Seconds(400);
queue.PeriodicCallForTesting();
EXPECT_EQ(100, done);
}
TEST(DoublingRateLimiter, Basic) {
absl::Time now = absl::Now();
DoublingRateLimiter queue(2, absl::Seconds(10), [&now]() { return now; });
EXPECT_EQ(2, queue.initial_rate());
EXPECT_EQ(absl::Seconds(10), queue.doubling_time());
EXPECT_EQ(0, queue.available());
EXPECT_EQ(0, queue.TokensToAdd(now, now));
EXPECT_THAT(
queue.TokensToAdd(now + absl::Seconds(11), now + absl::Seconds(10)),
::testing::Gt(4));
EXPECT_THAT(
queue.TokensToAdd(now + absl::Seconds(21), now + absl::Seconds(20)),
::testing::Gt(8));
std::atomic<size_t> done{0};
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] {
done++;
});
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
EXPECT_EQ(0, done);
now += absl::Seconds(1);
queue.PeriodicCallForTesting();
EXPECT_EQ(2, done);
now += absl::Seconds(10);
queue.PeriodicCallForTesting();
EXPECT_EQ(32, done);
now += absl::Seconds(20);
queue.PeriodicCallForTesting();
EXPECT_EQ(100, done);
}
} |
682 | cpp | google/tensorstore | http_request | tensorstore/internal/http/http_request.cc | tensorstore/internal/http/http_request_test.cc | #ifndef TENSORSTORE_INTERNAL_HTTP_HTTP_REQUEST_H_
#define TENSORSTORE_INTERNAL_HTTP_HTTP_REQUEST_H_
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
namespace tensorstore {
namespace internal_http {
struct HttpRequest {
std::string method;
std::string url;
std::string user_agent = {};
std::vector<std::string> headers = {};
bool accept_encoding = false;
template <typename Sink>
friend void AbslStringify(Sink& sink, const HttpRequest& request) {
absl::Format(&sink, "HttpRequest{%s %s user_agent=%s, headers=<",
request.method, request.url, request.user_agent);
const char* sep = "";
for (const auto& v : request.headers) {
sink.Append(sep);
#ifndef NDEBUG
if (absl::StartsWithIgnoreCase(v, "authorization:")) {
sink.Append(std::string_view(v).substr(0, 25));
sink.Append("#####");
} else
#endif
{
sink.Append(v);
}
sep = " ";
}
sink.Append(">}");
}
};
std::optional<std::string> FormatRangeHeader(
OptionalByteRangeRequest byte_range);
std::optional<std::string> FormatCacheControlMaxAgeHeader(
absl::Duration max_age);
std::optional<std::string> FormatStalenessBoundCacheControlHeader(
absl::Time staleness_bound);
class HttpRequestBuilder {
public:
using UriEncodeFunctor = absl::FunctionRef<std::string(std::string_view)>;
HttpRequestBuilder(std::string_view method, std::string base_url)
: HttpRequestBuilder(method, base_url,
internal::PercentEncodeUriComponent) {}
HttpRequestBuilder(std::string_view method, std::string base_url,
UriEncodeFunctor uri_encoder);
HttpRequest BuildRequest();
HttpRequestBuilder& AddQueryParameter(std::string_view key,
std::string_view value);
HttpRequestBuilder& EnableAcceptEncoding();
HttpRequestBuilder& AddHeader(std::string header);
HttpRequestBuilder& AddHeader(std::string_view header) {
return header.empty() ? *this : AddHeader(std::string(header));
}
HttpRequestBuilder& AddHeader(const char* header) {
return AddHeader(std::string_view(header));
}
HttpRequestBuilder& AddHeader(std::optional<std::string> header) {
return header ? AddHeader(std::move(*header)) : *this;
}
HttpRequestBuilder& MaybeAddRangeHeader(OptionalByteRangeRequest byte_range);
HttpRequestBuilder& MaybeAddCacheControlMaxAgeHeader(absl::Duration max_age);
HttpRequestBuilder& MaybeAddStalenessBoundCacheControlHeader(
absl::Time staleness_bound);
HttpRequestBuilder& AddHostHeader(std::string_view host);
private:
absl::FunctionRef<std::string(std::string_view)> uri_encoder_;
HttpRequest request_;
char const* query_parameter_separator_;
};
}
}
#endif
#include "tensorstore/internal/http/http_request.h"
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
namespace tensorstore {
namespace internal_http {
std::optional<std::string> FormatRangeHeader(
OptionalByteRangeRequest byte_range) {
assert(byte_range.SatisfiesInvariants());
if (byte_range.IsRange() &&
byte_range.exclusive_max > byte_range.inclusive_min) {
return absl::StrFormat("Range: bytes=%d-%d", byte_range.inclusive_min,
byte_range.exclusive_max - 1);
}
if (byte_range.IsSuffix()) {
return absl::StrFormat("Range: bytes=%d-", byte_range.inclusive_min);
}
if (byte_range.IsSuffixLength()) {
return absl::StrFormat("Range: bytes=%d", byte_range.inclusive_min);
}
return std::nullopt;
}
std::optional<std::string> FormatCacheControlMaxAgeHeader(
absl::Duration max_age) {
if (max_age >= absl::InfiniteDuration()) {
return std::nullopt;
}
auto max_age_seconds = absl::ToInt64Seconds(max_age);
if (max_age_seconds > 0) {
return absl::StrFormat("cache-control: max-age=%d", max_age_seconds);
} else {
return "cache-control: no-cache";
}
}
std::optional<std::string> FormatStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
if (staleness_bound == absl::InfinitePast()) {
return std::nullopt;
}
absl::Time now;
absl::Duration duration = absl::ZeroDuration();
if (staleness_bound != absl::InfiniteFuture() &&
(now = absl::Now()) > staleness_bound) {
duration = now - staleness_bound;
}
return FormatCacheControlMaxAgeHeader(duration);
}
HttpRequestBuilder::HttpRequestBuilder(
std::string_view method, std::string base_url,
absl::FunctionRef<std::string(std::string_view)> uri_encoder)
: uri_encoder_(uri_encoder),
request_{std::string(method), std::move(base_url)},
query_parameter_separator_("?") {
assert(!request_.method.empty());
assert(request_.method ==
absl::AsciiStrToUpper(std::string_view(request_.method)));
if (request_.url.find_last_of('?') != std::string::npos) {
query_parameter_separator_ = "&";
}
}
HttpRequest HttpRequestBuilder::BuildRequest() { return std::move(request_); }
HttpRequestBuilder& HttpRequestBuilder::AddHeader(std::string header) {
if (!header.empty()) {
request_.headers.push_back(std::move(header));
}
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::AddQueryParameter(
std::string_view key, std::string_view value) {
assert(!key.empty());
if (value.empty()) {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key));
} else {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key), "=", uri_encoder_(value));
}
query_parameter_separator_ = "&";
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::EnableAcceptEncoding() {
request_.accept_encoding = true;
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddRangeHeader(
OptionalByteRangeRequest byte_range) {
return AddHeader(FormatRangeHeader(std::move(byte_range)));
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddCacheControlMaxAgeHeader(
absl::Duration max_age) {
return AddHeader(FormatCacheControlMaxAgeHeader(max_age));
}
HttpRequestBuilder&
HttpRequestBuilder::MaybeAddStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
return AddHeader(FormatStalenessBoundCacheControlHeader(staleness_bound));
}
HttpRequestBuilder& HttpRequestBuilder::AddHostHeader(std::string_view host) {
if (host.empty()) {
host = internal::ParseGenericUri(request_.url).authority;
}
return AddHeader(absl::StrFormat("host: %s", host));
}
}
} | #include "tensorstore/internal/http/http_request.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/kvstore/byte_range.h"
namespace {
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::testing::AnyOf;
using ::testing::ElementsAre;
TEST(HttpRequestBuilder, BuildRequest) {
auto request = HttpRequestBuilder("GET", "http:
.AddHeader("X-foo: bar")
.AddQueryParameter("name", "dragon")
.AddQueryParameter("age", "1234")
.EnableAcceptEncoding()
.BuildRequest();
EXPECT_EQ("http:
EXPECT_TRUE(request.accept_encoding);
EXPECT_EQ("GET", request.method);
EXPECT_THAT(request.headers, testing::ElementsAre("X-foo: bar"));
}
TEST(HttpRequestBuilder, AddCacheControlMaxAgeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::InfiniteDuration());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::ZeroDuration());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: max-age=10"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(-absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
}
TEST(HttpRequestBuilder, AddStalenessBoundCacheControlHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfinitePast());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfiniteFuture());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
const absl::Time kFutureTime = absl::Now() + absl::Minutes(525600);
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(kFutureTime);
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::Now() -
absl::Milliseconds(5900));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre(AnyOf("cache-control: max-age=4",
"cache-control: max-age=5")));
}
}
TEST(HttpRequestBuilder, MaybeAddRangeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::Suffix(1));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=1-"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::SuffixLength(5));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=-5"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest{1, 2});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("Range: bytes=1-1"));
}
}
TEST(HttpRequestBuilder, AddHostHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("host: 127.0.0.1"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader("host.header");
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: host.header"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: localhost:1234"));
}
}
} |
683 | cpp | google/tensorstore | http_response | tensorstore/internal/http/http_response.cc | tensorstore/internal/http/http_response_test.cc | #ifndef TENSORSTORE_INTERNAL_HTTP_HTTP_RESPONSE_H_
#define TENSORSTORE_INTERNAL_HTTP_HTTP_RESPONSE_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_http {
struct HttpResponse {
int32_t status_code;
absl::Cord payload;
absl::btree_multimap<std::string, std::string> headers;
template <typename Sink>
friend void AbslStringify(Sink& sink, const HttpResponse& response) {
absl::Format(&sink, "HttpResponse{code=%d, headers=<",
response.status_code);
const char* sep = "";
for (const auto& kv : response.headers) {
sink.Append(sep);
sink.Append(kv.first);
sink.Append(": ");
#ifndef NDEBUG
if (absl::StrContainsIgnoreCase(kv.first, "auth_token")) {
sink.Append("#####");
} else
#endif
{
sink.Append(kv.second);
}
sep = " ";
}
if (response.payload.size() <= 64) {
absl::Format(&sink, ">, payload=%v}", response.payload);
} else {
absl::Format(&sink, ">, payload.size=%d}", response.payload.size());
}
}
};
const char* HttpResponseCodeToMessage(const HttpResponse& response);
absl::StatusCode HttpResponseCodeToStatusCode(const HttpResponse& response);
absl::Status HttpResponseCodeToStatus(
const HttpResponse& response,
SourceLocation loc = ::tensorstore::SourceLocation::current());
struct ParsedContentRange {
int64_t inclusive_min;
int64_t exclusive_max;
int64_t total_size;
};
Result<ParsedContentRange> ParseContentRangeHeader(
const HttpResponse& response);
}
}
#endif
#include "tensorstore/internal/http/http_response.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "re2/re2.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
const char* HttpResponseCodeToMessage(const HttpResponse& response) {
switch (response.status_code) {
case 400:
return "Bad Request";
case 401:
return "Unauthorized";
case 402:
return "Payment Required";
case 403:
return "Forbidden";
case 404:
return "Not Found";
case 405:
return "Method Not Allowed";
case 406:
return "Not Acceptable";
case 407:
return "Proxy Authentication Required";
case 408:
return "Request Timeout";
case 409:
return "Conflict";
case 410:
return "Gone";
case 411:
return "Length Required";
case 412:
return "Precondition Failed";
case 413:
return "Payload Too Large";
case 414:
return "URI Too Long";
case 415:
return "Unsupported Media Type";
case 416:
return "Range Not Satisfiable";
case 417:
return "Expectation Failed";
case 418:
return "I'm a teapot";
case 421:
return "Misdirected Request";
case 422:
return "Unprocessable Content";
case 423:
return "Locked";
case 424:
return "Failed Dependency";
case 425:
return "Too Early";
case 426:
return "Upgrade Required";
case 428:
return "Precondition Required";
case 429:
return "Too Many Requests";
case 431:
return "Request Header Fields Too Large";
case 451:
return "Unavailable For Legal Reasons";
case 500:
return "Internal Server Error";
case 501:
return "Not Implemented";
case 502:
return "Bad Gateway";
case 503:
return "Service Unavailable";
case 504:
return "Gateway Timeout";
case 505:
return "HTTP Version Not Supported";
case 506:
return "Variant Also Negotiates";
case 507:
return "Insufficient Storage";
case 508:
return "Loop Detected";
case 510:
return "Not Extended";
case 511:
return "Network Authentication Required";
default:
return nullptr;
}
}
absl::StatusCode HttpResponseCodeToStatusCode(const HttpResponse& response) {
switch (response.status_code) {
case 200:
case 201:
case 202:
case 204:
case 206:
return absl::StatusCode::kOk;
case 400:
case 411:
return absl::StatusCode::kInvalidArgument;
case 401:
case 403:
return absl::StatusCode::kPermissionDenied;
case 404:
case 410:
return absl::StatusCode::kNotFound;
case 302:
case 303:
case 304:
case 307:
case 412:
case 413:
return absl::StatusCode::kFailedPrecondition;
case 416:
return absl::StatusCode::kOutOfRange;
case 308:
case 408:
case 409:
case 429:
case 500:
case 502:
case 503:
case 504:
return absl::StatusCode::kUnavailable;
}
if (response.status_code < 300) {
return absl::StatusCode::kOk;
}
return absl::StatusCode::kUnknown;
}
absl::Status HttpResponseCodeToStatus(const HttpResponse& response,
SourceLocation loc) {
auto code = HttpResponseCodeToStatusCode(response);
if (code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
auto status_message = HttpResponseCodeToMessage(response);
if (!status_message) status_message = "Unknown";
absl::Status status(code, status_message);
if (!response.payload.empty()) {
status.SetPayload(
"http_response_body",
response.payload.Subcord(
0, response.payload.size() < 256 ? response.payload.size() : 256));
}
MaybeAddSourceLocation(status, loc);
status.SetPayload("http_response_code",
absl::Cord(tensorstore::StrCat(response.status_code)));
return status;
}
Result<ParsedContentRange> ParseContentRangeHeader(
const HttpResponse& response) {
auto it = response.headers.find("content-range");
if (it == response.headers.end()) {
if (response.status_code != 206) {
return absl::FailedPreconditionError(
tensorstore::StrCat("No Content-Range header expected with HTTP ",
response.status_code, " response"));
}
return absl::FailedPreconditionError(
"Expected Content-Range header with HTTP 206 response");
}
static const RE2 kContentRangeRegex(R"(^bytes (\d+)-(\d+)/(?:(\d+)|\*))");
int64_t a, b;
std::optional<int64_t> total_size;
if (!RE2::FullMatch(it->second, kContentRangeRegex, &a, &b, &total_size) ||
a > b || (total_size && b >= *total_size) ||
b == std::numeric_limits<int64_t>::max()) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Unexpected Content-Range header received: ", QuoteString(it->second)));
}
return ParsedContentRange{a, b + 1, total_size.value_or(-1)};
}
}
} | #include "tensorstore/internal/http/http_response.h"
#include <set>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::internal_http::HttpResponse;
TEST(HttpResponseCodeToStatusTest, AllCodes) {
using ::tensorstore::internal_http::HttpResponseCodeToStatus;
absl::flat_hash_set<int> seen;
for (auto code : {200, 201, 204, 206}) {
seen.insert(code);
EXPECT_TRUE(HttpResponseCodeToStatus({code, {}, {}}).ok()) << code;
}
for (auto code : {400, 411}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kInvalidArgument,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {401, 403}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kPermissionDenied,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {404, 410}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kNotFound,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {302, 303, 304, 307, 412, 413}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kFailedPrecondition,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {416}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kOutOfRange,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {308, 408, 409, 429, 500, 502, 503, 504}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kUnavailable,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (int i = 300; i < 600; i++) {
if (seen.count(i) > 0) continue;
EXPECT_EQ(absl::StatusCode::kUnknown,
HttpResponseCodeToStatus({i, {}, {}}).code())
<< i;
}
}
} |
684 | cpp | google/tensorstore | curl_wrappers | tensorstore/internal/http/curl_wrappers.cc | tensorstore/internal/http/curl_wrappers_test.cc | #ifndef TENSORSTORE_INTERNAL_HTTP_CURL_WRAPPERS_H_
#define TENSORSTORE_INTERNAL_HTTP_CURL_WRAPPERS_H_
#include <memory>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include <curl/curl.h>
#include "tensorstore/internal/source_location.h"
namespace tensorstore {
namespace internal_http {
struct CurlPtrCleanup {
void operator()(CURL*);
};
struct CurlMultiCleanup {
void operator()(CURLM*);
};
struct CurlSlistCleanup {
void operator()(curl_slist*);
};
using CurlPtr = std::unique_ptr<CURL, CurlPtrCleanup>;
using CurlMulti = std::unique_ptr<CURLM, CurlMultiCleanup>;
using CurlHeaders = std::unique_ptr<curl_slist, CurlSlistCleanup>;
std::string GetCurlUserAgentSuffix();
absl::Status CurlCodeToStatus(
CURLcode code, std::string_view detail,
SourceLocation loc = tensorstore::SourceLocation::current());
absl::Status CurlMCodeToStatus(
CURLMcode code, std::string_view,
SourceLocation loc = tensorstore::SourceLocation::current());
}
}
#endif
#include "tensorstore/internal/http/curl_wrappers.h"
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <curl/curl.h>
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
void CurlPtrCleanup::operator()(CURL* c) { curl_easy_cleanup(c); }
void CurlMultiCleanup::operator()(CURLM* m) { curl_multi_cleanup(m); }
void CurlSlistCleanup::operator()(curl_slist* s) { curl_slist_free_all(s); }
std::string GetCurlUserAgentSuffix() {
static std::string agent =
tensorstore::StrCat("tensorstore/0.1 ", curl_version());
return agent;
}
absl::Status CurlCodeToStatus(CURLcode code, std::string_view detail,
SourceLocation loc) {
auto error_code = absl::StatusCode::kUnknown;
switch (code) {
case CURLE_OK:
return absl::OkStatus();
case CURLE_COULDNT_RESOLVE_PROXY:
error_code = absl::StatusCode::kUnavailable;
if (detail.empty()) detail = "Failed to resolve proxy";
break;
case CURLE_OPERATION_TIMEDOUT:
error_code = absl::StatusCode::kDeadlineExceeded;
if (detail.empty()) detail = "Timed out";
break;
case CURLE_COULDNT_CONNECT:
case CURLE_COULDNT_RESOLVE_HOST:
case CURLE_GOT_NOTHING:
case CURLE_HTTP2:
case CURLE_HTTP2_STREAM:
case CURLE_PARTIAL_FILE:
case CURLE_RECV_ERROR:
case CURLE_SEND_ERROR:
case CURLE_SSL_CONNECT_ERROR:
case CURLE_UNSUPPORTED_PROTOCOL:
error_code = absl::StatusCode::kUnavailable;
break;
case CURLE_URL_MALFORMAT:
error_code = absl::StatusCode::kInvalidArgument;
break;
case CURLE_WRITE_ERROR:
error_code = absl::StatusCode::kCancelled;
break;
case CURLE_ABORTED_BY_CALLBACK:
error_code = absl::StatusCode::kAborted;
break;
case CURLE_REMOTE_ACCESS_DENIED:
error_code = absl::StatusCode::kPermissionDenied;
break;
case CURLE_SEND_FAIL_REWIND:
case CURLE_RANGE_ERROR:
error_code = absl::StatusCode::kInternal;
break;
case CURLE_BAD_FUNCTION_ARGUMENT:
case CURLE_OUT_OF_MEMORY:
case CURLE_NOT_BUILT_IN:
case CURLE_UNKNOWN_OPTION:
case CURLE_BAD_DOWNLOAD_RESUME:
error_code = absl::StatusCode::kInternal;
break;
default:
break;
}
absl::Status status(
error_code, tensorstore::StrCat("CURL error ", curl_easy_strerror(code),
detail.empty() ? "" : ": ", detail));
status.SetPayload("curl_code", absl::Cord(tensorstore::StrCat(code)));
MaybeAddSourceLocation(status, loc);
return status;
}
absl::Status CurlMCodeToStatus(CURLMcode code, std::string_view detail,
SourceLocation loc) {
if (code == CURLM_OK) {
return absl::OkStatus();
}
absl::Status status(
absl::StatusCode::kInternal,
tensorstore::StrCat("CURLM error ", curl_multi_strerror(code),
detail.empty() ? "" : ": ", detail));
status.SetPayload("curlm_code", absl::Cord(tensorstore::StrCat(code)));
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/internal/http/curl_wrappers.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_http::CurlCodeToStatus;
using ::tensorstore::internal_http::CurlMCodeToStatus;
TEST(CurlFactoryTest, CurlCodeToStatus) {
struct {
CURLcode curl;
absl::StatusCode expected;
} expected_codes[]{
{CURLE_OK, absl::StatusCode::kOk},
{CURLE_RECV_ERROR, absl::StatusCode::kUnavailable},
{CURLE_SEND_ERROR, absl::StatusCode::kUnavailable},
{CURLE_PARTIAL_FILE, absl::StatusCode::kUnavailable},
{CURLE_SSL_CONNECT_ERROR, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_RESOLVE_HOST, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_RESOLVE_PROXY, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_CONNECT, absl::StatusCode::kUnavailable},
{CURLE_REMOTE_ACCESS_DENIED, absl::StatusCode::kPermissionDenied},
{CURLE_OPERATION_TIMEDOUT, absl::StatusCode::kDeadlineExceeded},
{CURLE_ABORTED_BY_CALLBACK, absl::StatusCode::kAborted},
{CURLE_FAILED_INIT, absl::StatusCode::kUnknown},
{CURLE_GOT_NOTHING, absl::StatusCode::kUnavailable},
{CURLE_AGAIN, absl::StatusCode::kUnknown},
{CURLE_HTTP2, absl::StatusCode::kUnavailable},
{CURLE_BAD_DOWNLOAD_RESUME, absl::StatusCode::kInternal},
{CURLE_RANGE_ERROR, absl::StatusCode::kInternal},
{CURLE_UNSUPPORTED_PROTOCOL, absl::StatusCode::kUnavailable},
};
for (auto const& t : expected_codes) {
auto actual = CurlCodeToStatus(t.curl, {});
EXPECT_EQ(t.expected, actual.code()) << "CURL code=" << t.curl;
}
}
TEST(CurlFactoryTest, CurlMCodeToStatus) {
struct {
CURLMcode curl;
absl::StatusCode expected;
} expected_codes[]{
{CURLM_OK, absl::StatusCode::kOk},
{CURLM_BAD_HANDLE, absl::StatusCode::kInternal},
{CURLM_BAD_EASY_HANDLE, absl::StatusCode::kInternal},
{CURLM_OUT_OF_MEMORY, absl::StatusCode::kInternal},
{CURLM_INTERNAL_ERROR, absl::StatusCode::kInternal},
};
for (auto const& t : expected_codes) {
auto actual = CurlMCodeToStatus(t.curl, {});
EXPECT_EQ(t.expected, actual.code()) << "CURLM code=" << t.curl;
}
}
} |
685 | cpp | google/tensorstore | http_header | tensorstore/internal/http/http_header.cc | tensorstore/internal/http/http_header_test.cc | #ifndef TENSORSTORE_INTERNAL_HTTP_HTTP_HEADER_H_
#define TENSORSTORE_INTERNAL_HTTP_HTTP_HEADER_H_
#include <stddef.h>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
namespace tensorstore {
namespace internal_http {
constexpr const char kHttpTimeFormat[] = "%a, %d %b %E4Y %H:%M:%S GMT";
absl::Status ValidateHttpHeader(std::string_view header);
size_t AppendHeaderData(absl::btree_multimap<std::string, std::string>& headers,
std::string_view data);
std::optional<std::tuple<size_t, size_t, size_t>> TryParseContentRangeHeader(
const absl::btree_multimap<std::string, std::string>& headers);
template <typename T>
std::optional<T> TryParseIntHeader(
const absl::btree_multimap<std::string, std::string>& headers,
std::string_view header) {
auto it = headers.find(header);
T result;
if (it != headers.end() && absl::SimpleAtoi(it->second, &result)) {
return result;
}
return std::nullopt;
}
std::optional<bool> TryParseBoolHeader(
const absl::btree_multimap<std::string, std::string>& headers,
std::string_view header);
std::optional<size_t> TryGetContentLength(
const absl::btree_multimap<std::string, std::string>& headers);
}
}
#endif
#include "tensorstore/internal/http/http_header.h"
#include <stddef.h>
#include <iterator>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "re2/re2.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
namespace {
static inline constexpr internal::AsciiSet kTChar{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
R"(!#$%&'*+-.)"};
inline bool IsTchar(char ch) { return kTChar.Test(ch); }
inline bool IsOWS(char ch) { return ch == ' ' || ch == '\t'; }
}
absl::Status ValidateHttpHeader(std::string_view header) {
static LazyRE2 kHeaderPattern = {
"[!#\\$%&'*+\\-\\.\\^_`|~0-9a-zA-Z]+"
":"
"[\t\x20-\x7e\x80-\xff]*",
RE2::Latin1};
if (!RE2::FullMatch(header, *kHeaderPattern)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid HTTP header: ", tensorstore::QuoteString(header)));
}
return absl::OkStatus();
}
size_t AppendHeaderData(absl::btree_multimap<std::string, std::string>& headers,
std::string_view data) {
if (data.empty() || *data.rbegin() != '\n') return data.size();
for (std::string_view field : absl::StrSplit(data, '\n', absl::SkipEmpty())) {
if (field.empty() || *field.rbegin() != '\r') break;
field.remove_suffix(1);
while (!field.empty() && IsOWS(*field.rbegin())) field.remove_suffix(1);
if (field.empty()) continue;
auto it = field.begin();
for (; it != field.end() && IsTchar(*it); ++it) {
}
if (it == field.begin() || it == field.end() || *it != ':') {
continue;
}
std::string field_name = absl::AsciiStrToLower(
std::string_view(field.data(), std::distance(field.begin(), it)));
field.remove_prefix(field_name.size() + 1);
while (!field.empty() && IsOWS(*field.begin())) field.remove_prefix(1);
headers.emplace(std::move(field_name), std::string(field));
}
return data.size();
}
std::optional<std::tuple<size_t, size_t, size_t>> TryParseContentRangeHeader(
const absl::btree_multimap<std::string, std::string>& headers) {
auto it = headers.find("content-range");
if (it == headers.end()) {
return std::nullopt;
}
static LazyRE2 kContentRange1 = {R"(^bytes (\d+)-(\d+)/(\d+))"};
static LazyRE2 kContentRange2 = {R"(^bytes (\d+)-(\d+)(/[*])?)"};
std::tuple<size_t, size_t, size_t> result(0, 0, 0);
if (RE2::FullMatch(it->second, *kContentRange1, &std::get<0>(result),
&std::get<1>(result), &std::get<2>(result))) {
return result;
}
if (RE2::FullMatch(it->second, *kContentRange2, &std::get<0>(result),
&std::get<1>(result))) {
return result;
}
return std::nullopt;
}
std::optional<bool> TryParseBoolHeader(
const absl::btree_multimap<std::string, std::string>& headers,
std::string_view header) {
auto it = headers.find(header);
bool result;
if (it != headers.end() && absl::SimpleAtob(it->second, &result)) {
return result;
}
return std::nullopt;
}
std::optional<size_t> TryGetContentLength(
const absl::btree_multimap<std::string, std::string>& headers) {
std::optional<size_t> content_length;
if (headers.find("transfer-encoding") == headers.end() &&
headers.find("content-encoding") == headers.end()) {
content_length = TryParseIntHeader<size_t>(headers, "content-length");
}
if (!content_length) {
auto content_range = TryParseContentRangeHeader(headers);
if (content_range) {
content_length =
1 + std::get<1>(*content_range) - std::get<0>(*content_range);
}
}
return content_length;
}
}
} | #include "tensorstore/internal/http/http_header.h"
#include <stddef.h>
#include <optional>
#include <string>
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_http::AppendHeaderData;
using ::tensorstore::internal_http::TryParseBoolHeader;
using ::tensorstore::internal_http::TryParseContentRangeHeader;
using ::tensorstore::internal_http::TryParseIntHeader;
using ::tensorstore::internal_http::ValidateHttpHeader;
TEST(ValidateHttpHeaderTest, Valid) {
TENSORSTORE_EXPECT_OK(ValidateHttpHeader("a!#$%&'*+-.^_`|~3X: b\xfe"));
}
TEST(ValidateHttpHeaderTest, Invalid) {
EXPECT_THAT(ValidateHttpHeader("a"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ValidateHttpHeader("a: \n"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(AppendHeaderData, BadHeaders) {
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(0, AppendHeaderData(headers, ""));
EXPECT_EQ(2, AppendHeaderData(headers, "\r\n"));
EXPECT_EQ(8, AppendHeaderData(headers, "foo: bar"));
EXPECT_EQ(5, AppendHeaderData(headers, "foo\r\n"));
EXPECT_EQ(7, AppendHeaderData(headers, "fo@: \r\n"));
EXPECT_TRUE(headers.empty());
}
TEST(AppendHeaderData, GoodHeaders) {
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(10, AppendHeaderData(headers, "bar: baz\r\n"));
EXPECT_FALSE(headers.empty());
ASSERT_EQ(1, headers.count("bar"));
auto range = headers.equal_range("bar");
EXPECT_EQ("baz", range.first->second);
}
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(6, AppendHeaderData(headers, "foo:\r\n"));
ASSERT_EQ(1, headers.count("foo"));
auto range = headers.equal_range("foo");
EXPECT_EQ("", range.first->second);
}
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(16, AppendHeaderData(headers, "bAr: \t baz \t\r\n"));
ASSERT_EQ(1, headers.count("bar"));
auto range = headers.equal_range("bar");
EXPECT_EQ("baz", range.first->second);
}
{
absl::btree_multimap<std::string, std::string> headers;
EXPECT_EQ(16, AppendHeaderData(headers, "bAr: \t one \t\r\n"));
EXPECT_EQ(10, AppendHeaderData(headers, "bar: two\r\n"));
ASSERT_EQ(2, headers.count("bar"));
auto range = headers.equal_range("bar");
EXPECT_EQ("one", range.first->second);
++range.first;
EXPECT_EQ("two", range.first->second);
}
}
TEST(TryParse, ContentRangeHeader) {
EXPECT_THAT(
TryParseContentRangeHeader({{"content-range", "bytes 10-20/100"}}),
::testing::Optional(
testing::Eq(std::tuple<size_t, size_t, size_t>(10, 20, 100))));
EXPECT_THAT(TryParseContentRangeHeader({{"content-range", "bytes 10-20/*"}}),
::testing::Optional(
testing::Eq(std::tuple<size_t, size_t, size_t>(10, 20, 0))));
EXPECT_THAT(TryParseContentRangeHeader({{"content-range", "bytes 10-20"}}),
::testing::Optional(
testing::Eq(std::tuple<size_t, size_t, size_t>(10, 20, 0))));
EXPECT_THAT(
TryParseContentRangeHeader({{"content-range", "bytes 1-abc/100"}}),
::testing::Eq(std::nullopt));
}
TEST(TryParse, BoolHeader) {
EXPECT_THAT(TryParseBoolHeader({{"bool-header", "true"}}, "bool-header"),
::testing::Optional(testing::Eq(true)));
}
TEST(TryParse, IntHeader) {
EXPECT_THAT(TryParseIntHeader<size_t>({{"int-header", "100"}}, "int-header"),
::testing::Optional(testing::Eq(100)));
}
} |
686 | cpp | google/tensorstore | curl_transport | tensorstore/internal/http/curl_transport.cc | tensorstore/internal/http/curl_transport_test.cc | #ifndef TENSORSTORE_INTERNAL_HTTP_CURL_TRANSPORT_H_
#define TENSORSTORE_INTERNAL_HTTP_CURL_TRANSPORT_H_
#include <memory>
#include "tensorstore/internal/http/curl_factory.h"
#include "tensorstore/internal/http/curl_handle.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_transport.h"
namespace tensorstore {
namespace internal_http {
void InitializeCurlHandle(CURL* handle);
class CurlTransport : public HttpTransport {
public:
explicit CurlTransport(std::shared_ptr<CurlHandleFactory> factory);
~CurlTransport() override;
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) override;
private:
class Impl;
std::shared_ptr<Impl> impl_;
};
std::shared_ptr<HttpTransport> GetDefaultHttpTransport();
void SetDefaultHttpTransport(std::shared_ptr<HttpTransport> t);
}
}
#endif
#include "tensorstore/internal/http/curl_transport.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <curl/curl.h>
#include "tensorstore/internal/container/circular_queue.h"
#include "tensorstore/internal/cord_util.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_factory.h"
#include "tensorstore/internal/http/curl_handle.h"
#include "tensorstore/internal/http/curl_wrappers.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/thread/thread.h"
ABSL_FLAG(std::optional<uint32_t>, tensorstore_http_threads, std::nullopt,
"Threads to use for http requests. "
"Overrides TENSORSTORE_HTTP_THREADS.");
namespace tensorstore {
namespace internal_http {
namespace {
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal_container::CircularQueue;
auto& http_request_started = internal_metrics::Counter<int64_t>::New(
"/tensorstore/http/request_started", "HTTP requests started");
auto& http_request_completed = internal_metrics::Counter<int64_t>::New(
"/tensorstore/http/request_completed", "HTTP requests completed");
auto& http_request_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/request_bytes", "HTTP request bytes transmitted");
auto& http_request_header_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/request_header_bytes",
"HTTP request bytes transmitted");
auto& http_response_codes = internal_metrics::Counter<int64_t, int>::New(
"/tensorstore/http/response_codes", "code",
"HTTP response status code counts");
auto& http_response_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/response_bytes", "HTTP response bytes received");
auto& http_active = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/http/active", "HTTP requests considered active");
auto& http_total_time_ms =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/total_time_ms", "HTTP total latency (ms)");
auto& http_first_byte_latency_us =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/first_byte_latency_us",
"HTTP first byte received latency (us)");
auto& http_poll_time_ns =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/http_poll_time_ns",
"HTTP time spent in curl_multi_poll (ns)");
uint32_t GetHttpThreads() {
return std::max(1u, GetFlagOrEnvValue(FLAGS_tensorstore_http_threads,
"TENSORSTORE_HTTP_THREADS")
.value_or(4u));
}
struct CurlRequestState {
std::shared_ptr<CurlHandleFactory> factory_;
CurlHandle handle_;
CurlHeaders headers_;
absl::Cord payload_;
absl::Cord::CharIterator payload_it_;
size_t payload_remaining_;
HttpResponseHandler* response_handler_ = nullptr;
size_t response_payload_size_ = 0;
bool status_set = false;
char error_buffer_[CURL_ERROR_SIZE];
CurlRequestState(std::shared_ptr<CurlHandleFactory> factory)
: factory_(std::move(factory)), handle_(CurlHandle::Create(*factory_)) {
error_buffer_[0] = 0;
handle_.SetOption(CURLOPT_ERRORBUFFER, error_buffer_);
handle_.SetOption(CURLOPT_BUFFERSIZE, 512 * 1024);
handle_.SetOption(CURLOPT_TCP_NODELAY, 1L);
handle_.SetOption(CURLOPT_WRITEDATA, this);
handle_.SetOption(CURLOPT_WRITEFUNCTION,
&CurlRequestState::CurlWriteCallback);
handle_.SetOption(CURLOPT_HEADERDATA, this);
handle_.SetOption(CURLOPT_HEADERFUNCTION,
&CurlRequestState::CurlHeaderCallback);
}
~CurlRequestState() {
handle_.SetOption(CURLOPT_WRITEDATA, nullptr);
handle_.SetOption(CURLOPT_WRITEFUNCTION, nullptr);
handle_.SetOption(CURLOPT_READDATA, nullptr);
handle_.SetOption(CURLOPT_READFUNCTION, nullptr);
handle_.SetOption(CURLOPT_SEEKDATA, nullptr);
handle_.SetOption(CURLOPT_SEEKFUNCTION, nullptr);
handle_.SetOption(CURLOPT_HEADERDATA, nullptr);
handle_.SetOption(CURLOPT_HEADERFUNCTION, nullptr);
handle_.SetOption(CURLOPT_ERRORBUFFER, nullptr);
CurlHandle::Cleanup(*factory_, std::move(handle_));
}
void Prepare(const HttpRequest& request, IssueRequestOptions options) {
handle_.SetOption(CURLOPT_URL, request.url.c_str());
std::string user_agent = request.user_agent + GetCurlUserAgentSuffix();
handle_.SetOption(CURLOPT_USERAGENT, user_agent.c_str());
curl_slist* head = nullptr;
size_t header_bytes_ = 0;
for (const std::string& h : request.headers) {
head = curl_slist_append(head, h.c_str());
header_bytes_ += h.size();
}
headers_.reset(head);
handle_.SetOption(CURLOPT_HTTPHEADER, headers_.get());
if (request.accept_encoding) {
handle_.SetOption(CURLOPT_ACCEPT_ENCODING, "");
}
if (options.request_timeout > absl::ZeroDuration()) {
auto ms = absl::ToInt64Milliseconds(options.request_timeout);
handle_.SetOption(CURLOPT_TIMEOUT_MS, ms > 0 ? ms : 1);
}
if (options.connect_timeout > absl::ZeroDuration()) {
auto ms = absl::ToInt64Milliseconds(options.connect_timeout);
handle_.SetOption(CURLOPT_CONNECTTIMEOUT_MS, ms > 0 ? ms : 1);
}
payload_ = std::move(options.payload);
payload_remaining_ = payload_.size();
if (payload_remaining_ > 0) {
payload_it_ = payload_.char_begin();
handle_.SetOption(CURLOPT_READDATA, this);
handle_.SetOption(CURLOPT_READFUNCTION,
&CurlRequestState::CurlReadCallback);
handle_.SetOption(CURLOPT_SEEKDATA, this);
handle_.SetOption(CURLOPT_SEEKFUNCTION,
&CurlRequestState::CurlSeekCallback);
}
if (request.method == "GET") {
handle_.SetOption(CURLOPT_PIPEWAIT, 1L);
handle_.SetOption(CURLOPT_HTTPGET, 1L);
} else if (request.method == "HEAD") {
handle_.SetOption(CURLOPT_NOBODY, 1L);
} else if (request.method == "PUT") {
handle_.SetOption(CURLOPT_UPLOAD, 1L);
handle_.SetOption(CURLOPT_PUT, 1L);
handle_.SetOption(CURLOPT_INFILESIZE_LARGE, payload_remaining_);
} else if (request.method == "POST") {
handle_.SetOption(CURLOPT_POST, 1L);
handle_.SetOption(CURLOPT_POSTFIELDSIZE_LARGE, payload_remaining_);
} else if (request.method == "PATCH") {
handle_.SetOption(CURLOPT_UPLOAD, 1L);
handle_.SetOption(CURLOPT_CUSTOMREQUEST, "PATCH");
handle_.SetOption(CURLOPT_POSTFIELDSIZE_LARGE, payload_remaining_);
} else {
handle_.SetOption(CURLOPT_CUSTOMREQUEST, request.method.c_str());
}
switch (options.http_version) {
case IssueRequestOptions::HttpVersion::kHttp1:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
break;
case IssueRequestOptions::HttpVersion::kHttp2:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_0);
break;
case IssueRequestOptions::HttpVersion::kHttp2TLS:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
break;
case IssueRequestOptions::HttpVersion::kHttp2PriorKnowledge:
handle_.SetOption(CURLOPT_HTTP_VERSION,
CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE);
break;
default:
break;
}
http_request_started.Increment();
http_request_bytes.Observe(payload_remaining_);
http_request_header_bytes.Observe(header_bytes_);
}
void SetForbidReuse() {
handle_.SetOption(CURLOPT_FORBID_REUSE, 1);
}
bool MaybeSetStatusAndProcess() {
if (status_set) return true;
auto status_code = handle_.GetResponseCode();
if (status_code < 200) return false;
response_handler_->OnStatus(status_code);
status_set = true;
return true;
}
static size_t CurlHeaderCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
auto data =
std::string_view(static_cast<char const*>(contents), size * nmemb);
if (self->MaybeSetStatusAndProcess()) {
self->response_handler_->OnResponseHeader(data);
}
return data.size();
}
static size_t CurlWriteCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
auto data =
std::string_view(static_cast<char const*>(contents), size * nmemb);
if (self->MaybeSetStatusAndProcess()) {
self->response_payload_size_ += data.size();
self->response_handler_->OnResponseBody(data);
}
return data.size();
}
static size_t CurlReadCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
size_t n = std::min(size * nmemb, self->payload_remaining_);
internal::CopyCordToSpan(self->payload_it_, {static_cast<char*>(contents),
static_cast<ptrdiff_t>(n)});
self->payload_remaining_ -= n;
return n;
}
static int CurlSeekCallback(void* userdata, curl_off_t offset, int origin) {
if (origin != SEEK_SET) {
return CURL_SEEKFUNC_CANTSEEK;
}
auto* self = static_cast<CurlRequestState*>(userdata);
if (offset < 0 || offset > self->payload_.size()) {
return CURL_SEEKFUNC_FAIL;
}
self->payload_it_ = self->payload_.char_begin();
absl::Cord::Advance(&self->payload_it_, static_cast<size_t>(offset));
self->payload_remaining_ =
self->payload_.size() - static_cast<size_t>(offset);
return CURL_SEEKFUNC_OK;
}
};
class MultiTransportImpl {
public:
MultiTransportImpl(std::shared_ptr<CurlHandleFactory> factory,
size_t nthreads);
~MultiTransportImpl();
void EnqueueRequest(const HttpRequest& request, IssueRequestOptions options,
HttpResponseHandler* response_handler);
void FinishRequest(std::unique_ptr<CurlRequestState> state, CURLcode code);
private:
struct ThreadData {
std::atomic<int64_t> count = 0;
CurlMulti multi;
absl::Mutex mutex;
CircularQueue<std::unique_ptr<CurlRequestState>> pending{16};
bool done = false;
};
void Run(ThreadData& thread_data);
void MaybeAddPendingTransfers(ThreadData& thread_data);
void RemoveCompletedTransfers(ThreadData& thread_data);
std::shared_ptr<CurlHandleFactory> factory_;
std::atomic<bool> done_{false};
std::unique_ptr<ThreadData[]> thread_data_;
std::vector<internal::Thread> threads_;
};
MultiTransportImpl::MultiTransportImpl(
std::shared_ptr<CurlHandleFactory> factory, size_t nthreads)
: factory_(std::move(factory)) {
assert(factory_);
threads_.reserve(nthreads);
thread_data_ = std::make_unique<ThreadData[]>(nthreads);
for (size_t i = 0; i < nthreads; ++i) {
thread_data_[i].multi = factory_->CreateMultiHandle();
threads_.push_back(
internal::Thread({"curl_multi_thread"},
[this, index = i] { Run(thread_data_[index]); }));
}
}
MultiTransportImpl::~MultiTransportImpl() {
done_ = true;
for (size_t i = 0; i < threads_.size(); ++i) {
auto& thread_data = thread_data_[i];
absl::MutexLock l(&thread_data.mutex);
thread_data.done = true;
curl_multi_wakeup(thread_data.multi.get());
}
for (auto& thread : threads_) {
thread.Join();
}
for (size_t i = 0; i < threads_.size(); ++i) {
factory_->CleanupMultiHandle(std::move(thread_data_[i].multi));
}
}
void MultiTransportImpl::EnqueueRequest(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) {
if (done_.load()) {
response_handler->OnFailure(
absl::InternalError("MultiTransportImpl is shutting down"));
return;
}
auto state = std::make_unique<CurlRequestState>(factory_);
state->response_handler_ = response_handler;
state->Prepare(request, std::move(options));
size_t selected_index = 0;
for (size_t i = 1; i < threads_.size(); ++i) {
if (thread_data_[i].count < thread_data_[selected_index].count) {
selected_index = i;
}
}
auto& selected = thread_data_[selected_index];
absl::MutexLock l(&selected.mutex);
selected.pending.push_back(std::move(state));
selected.count++;
curl_multi_wakeup(selected.multi.get());
}
void MultiTransportImpl::FinishRequest(std::unique_ptr<CurlRequestState> state,
CURLcode code) {
if (code == CURLE_HTTP2) {
ABSL_LOG(WARNING) << "CURLE_HTTP2 " << state->error_buffer_;
state->SetForbidReuse();
}
http_request_completed.Increment();
http_response_bytes.Observe(state->response_payload_size_);
{
curl_off_t first_byte_us = 0;
state->handle_.GetInfo(CURLINFO_STARTTRANSFER_TIME_T, &first_byte_us);
http_first_byte_latency_us.Observe(first_byte_us);
}
{
curl_off_t total_time_us = 0;
state->handle_.GetInfo(CURLINFO_TOTAL_TIME_T, &total_time_us);
http_total_time_ms.Observe(total_time_us / 1000);
}
if (code != CURLE_OK) {
state->response_handler_->OnFailure(
CurlCodeToStatus(code, state->error_buffer_));
return;
}
http_response_codes.Increment(state->handle_.GetResponseCode());
assert(state->status_set);
state->response_handler_->OnComplete();
}
void MultiTransportImpl::Run(ThreadData& thread_data) {
for (;;) {
MaybeAddPendingTransfers(thread_data);
if (thread_data.count == 0) {
absl::MutexLock l(&thread_data.mutex);
if (thread_data.done) break;
thread_data.mutex.Await(absl::Condition(
+[](ThreadData* td) { return !td->pending.empty() || td->done; },
&thread_data));
if (thread_data.done) break;
continue;
}
const int timeout_ms = std::numeric_limits<int>::max();
int numfds = 0;
errno = 0;
auto start_poll = absl::Now();
CURLMcode mcode = curl_multi_poll(thread_data.multi.get(), nullptr, 0,
timeout_ms, &numfds);
if (mcode != CURLM_OK) {
ABSL_LOG(WARNING) << CurlMCodeToStatus(mcode, "in curl_multi_poll");
}
http_poll_time_ns.Observe(
absl::ToInt64Nanoseconds(absl::Now() - start_poll));
{
int running_handles = 0;
CURLMcode mcode;
do {
mcode = curl_multi_perform(thread_data.multi.get(), &running_handles);
http_active.Set(running_handles);
} while (mcode == CURLM_CALL_MULTI_PERFORM);
if (mcode != CURLM_OK) {
ABSL_LOG(WARNING) << CurlMCodeToStatus(mcode, "in curl_multi_perform");
}
}
RemoveCompletedTransfers(thread_data);
}
assert(thread_data.count == 0);
}
void MultiTransportImpl::MaybeAddPendingTransfers(ThreadData& thread_data) {
absl::MutexLock l(&thread_data.mutex);
while (!thread_data.pending.empty()) {
std::unique_ptr<CurlRequestState> state =
std::move(thread_data.pending.front());
thread_data.pending.pop_front();
assert(state != nullptr);
state->handle_.SetOption(CURLOPT_PRIVATE, state.get());
CURL* e = state->handle_.get();
CURLMcode mcode = curl_multi_add_handle(thread_data.multi.get(), e);
if (mcode == CURLM_OK) {
state.release();
} else {
thread_data.count--;
state->handle_.SetOption(CURLOPT_PRIVATE, nullptr);
state->response_handler_->OnFailure(
CurlMCodeToStatus(mcode, "in curl_multi_add_handle"));
}
};
}
void MultiTransportImpl::RemoveCompletedTransfers(ThreadData& thread_data) {
CURLMsg* m = nullptr;
do {
int messages_in_queue;
m = curl_multi_info_read(thread_data.multi.get(), &messages_in_queue);
if (m && m->msg == CURLMSG_DONE) {
CURLcode result = m->data.result;
CURL* e = m->easy_handle;
curl_multi_remove_handle(thread_data.multi.get(), e);
thread_data.count--;
CurlRequestState* pvt = nullptr;
curl_easy_getinfo(e, CURLINFO_PRIVATE, &pvt);
assert(pvt);
std::unique_ptr<CurlRequestState> state(pvt);
state->handle_.SetOption(CURLOPT_PRIVATE, nullptr);
FinishRequest(std::move(state), result);
}
} while (m != nullptr);
}
}
class CurlTransport::Impl : public MultiTransportImpl {
public:
using MultiTransportImpl::MultiTransportImpl;
};
CurlTransport::CurlTransport(std::shared_ptr<CurlHandleFactory> factory)
: impl_(std::make_unique<Impl>(std::move(factory),
GetHttpThreads())) {}
CurlTransport::~CurlTransport() = default;
void CurlTransport::IssueRequestWithHandler(
const HttpRequest& request, IssueRequestOptions options,
HttpResponseHandler* response_handler) {
assert(impl_);
impl_->EnqueueRequest(request, std::move(options), response_handler);
}
namespace {
struct GlobalTransport {
std::shared_ptr<HttpTransport> transport_;
std::shared_ptr<HttpTransport> Get() {
if (!transport_) {
transport_ =
std::make_shared<CurlTransport>(GetDefaultCurlHandleFactory());
}
return transport_;
}
void Set(std::shared_ptr<HttpTransport> transport) {
transport_ = std::move(transport);
}
};
ABSL_CONST_INIT absl::Mutex global_mu(absl::kConstInit);
static GlobalTransport& GetGlobalTransport() {
static auto* g = new GlobalTransport();
return *g;
}
}
std::shared_ptr<HttpTransport> GetDefaultHttpTransport() {
absl::MutexLock l(&global_mu);
return GetGlobalTransport().Get();
}
void SetDefaultHttpTransport(std::shared_ptr<HttpTransport> t) {
absl::MutexLock l(&global_mu);
return GetGlobalTransport().Set(std::move(t));
}
}
} | #ifdef _WIN32
#undef UNICODE
#define WIN32_LEAN_AND_MEAN
#endif
#include "tensorstore/internal/http/curl_transport.h"
#include <optional>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/transport_test_utils.h"
#include "tensorstore/internal/thread/thread.h"
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::transport_test_utils::AcceptNonBlocking;
using ::tensorstore::transport_test_utils::AssertSend;
using ::tensorstore::transport_test_utils::CloseSocket;
using ::tensorstore::transport_test_utils::CreateBoundSocket;
using ::tensorstore::transport_test_utils::FormatSocketAddress;
using ::tensorstore::transport_test_utils::ReceiveAvailable;
using ::tensorstore::transport_test_utils::socket_t;
using ::testing::HasSubstr;
namespace {
class CurlTransportTest : public ::testing::Test {
public:
};
TEST_F(CurlTransportTest, Http1) {
auto transport = ::tensorstore::internal_http::GetDefaultHttpTransport();
auto socket = CreateBoundSocket();
ABSL_CHECK(socket.has_value());
auto hostport = FormatSocketAddress(*socket);
ABSL_CHECK(!hostport.empty());
static constexpr char kResponse[] =
"HTTP/1.1 200 OK\r\n"
"Content-Type: text/html\r\n"
"\r\n"
"<html>\n<body>\n<h1>Hello, World!</h1>\n</body>\n</html>\n";
std::string initial_request;
tensorstore::internal::Thread serve_thread({"serve_thread"}, [&] {
auto client_fd = AcceptNonBlocking(*socket);
ABSL_CHECK(client_fd.has_value());
initial_request = ReceiveAvailable(*client_fd);
AssertSend(*client_fd, kResponse);
CloseSocket(*client_fd);
});
auto response = transport->IssueRequest(
HttpRequestBuilder("POST", absl::StrCat("http:
.AddHeader("X-foo: bar")
.AddQueryParameter("name", "dragon")
.AddQueryParameter("age", "1234")
.EnableAcceptEncoding()
.BuildRequest(),
IssueRequestOptions(absl::Cord("Hello")));
ABSL_LOG(INFO) << response.status();
ABSL_LOG(INFO) << "Wait on server";
serve_thread.Join();
CloseSocket(*socket);
EXPECT_THAT(initial_request, HasSubstr("POST /?name=dragon&age=1234"));
EXPECT_THAT(initial_request,
HasSubstr(absl::StrCat("Host: ", hostport, "\r\n")));
EXPECT_THAT(initial_request, HasSubstr("Accept: **\r\n"));
EXPECT_THAT(request, HasSubstr("X-foo: bar\r\n"));
EXPECT_THAT(request, HasSubstr("Content-Length: 5"));
EXPECT_THAT(
request,
HasSubstr("Content-Type: application/x-www-form-urlencoded\r\n"));
EXPECT_THAT(request, HasSubstr("Hello"));
}
}
} |
687 | cpp | google/tensorstore | verbose_flag | tensorstore/internal/log/verbose_flag.cc | tensorstore/internal/log/verbose_flag_test.cc | #ifndef TENSORSTORE_INTERNAL_LOG_VERBOSE_FLAG_H_
#define TENSORSTORE_INTERNAL_LOG_VERBOSE_FLAG_H_
#include <stddef.h>
#include <atomic>
#include <limits>
#include <string_view>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
namespace tensorstore {
namespace internal_log {
void UpdateVerboseLogging(std::string_view input, bool overwrite);
class VerboseFlag {
public:
constexpr static int kValueUninitialized = std::numeric_limits<int>::max();
explicit constexpr VerboseFlag(const char* name)
: value_(kValueUninitialized), name_(name), next_(nullptr) {}
VerboseFlag(const VerboseFlag&) = delete;
VerboseFlag& operator=(const VerboseFlag&) = delete;
ABSL_ATTRIBUTE_ALWAYS_INLINE
bool Level(int level) {
int v = value_.load(std::memory_order_relaxed);
if (ABSL_PREDICT_TRUE(level > v)) {
return false;
}
return VerboseFlagSlowPath(this, v, level);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE
operator bool() {
int v = value_.load(std::memory_order_relaxed);
if (ABSL_PREDICT_TRUE(0 > v)) {
return false;
}
return VerboseFlagSlowPath(this, v, 0);
}
private:
static bool VerboseFlagSlowPath(VerboseFlag* flag, int old_v, int level);
static int RegisterVerboseFlag(VerboseFlag* flag);
std::atomic<int> value_;
const char* const name_;
VerboseFlag* next_;
friend void UpdateVerboseLogging(std::string_view, bool);
};
}
}
#endif
#include "tensorstore/internal/log/verbose_flag.h"
#include <stddef.h>
#include <atomic>
#include <cassert>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/env.h"
ABSL_FLAG(std::string, tensorstore_verbose_logging, {},
"comma-separated list of tensorstore verbose logging flags")
.OnUpdate([]() {
if (!absl::GetFlag(FLAGS_tensorstore_verbose_logging).empty()) {
tensorstore::internal_log::UpdateVerboseLogging(
absl::GetFlag(FLAGS_tensorstore_verbose_logging), true);
}
});
namespace tensorstore {
namespace internal_log {
namespace {
ABSL_CONST_INIT absl::Mutex g_mutex(absl::kConstInit);
ABSL_CONST_INIT VerboseFlag* g_list_head ABSL_GUARDED_BY(g_mutex) = nullptr;
struct LoggingLevelConfig {
int default_level = -1;
absl::flat_hash_map<std::string, int> levels;
};
void UpdateLoggingLevelConfig(std::string_view input,
LoggingLevelConfig& config) {
auto& levels = config.levels;
for (std::string_view flag : absl::StrSplit(input, ',', absl::SkipEmpty())) {
const size_t eq = flag.rfind('=');
if (eq == flag.npos) {
levels.insert_or_assign(std::string(flag), 0);
continue;
}
if (eq == 0) continue;
int level;
if (!absl::SimpleAtoi(flag.substr(eq + 1), &level)) continue;
if (level < -1) {
level = -1;
} else if (level > 1000) {
level = 1000;
}
levels.insert_or_assign(std::string(flag.substr(0, eq)), level);
}
config.default_level = -1;
if (auto it = levels.find("all"); it != levels.end()) {
config.default_level = it->second;
}
}
LoggingLevelConfig& GetLoggingLevelConfig()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(g_mutex) {
static absl::NoDestructor<LoggingLevelConfig> flags{[] {
LoggingLevelConfig config;
if (auto env = internal::GetEnv("TENSORSTORE_VERBOSE_LOGGING"); env) {
UpdateLoggingLevelConfig(*env, config);
}
return config;
}()};
return *flags;
}
}
void UpdateVerboseLogging(std::string_view input, bool overwrite)
ABSL_LOCKS_EXCLUDED(g_mutex) {
ABSL_LOG(INFO) << "--tensorstore_verbose_logging=" << input;
LoggingLevelConfig config;
UpdateLoggingLevelConfig(input, config);
absl::MutexLock lock(&g_mutex);
VerboseFlag* slist = g_list_head;
LoggingLevelConfig& global_config = GetLoggingLevelConfig();
std::swap(global_config.levels, config.levels);
std::swap(global_config.default_level, config.default_level);
if (!overwrite) {
if (global_config.levels.count("all")) {
global_config.default_level = config.default_level;
}
global_config.levels.merge(config.levels);
}
std::string_view last;
int last_level = 0;
while (slist != nullptr) {
std::string_view current(slist->name_);
if (current != last) {
last = current;
auto it = global_config.levels.find(current);
if (it != global_config.levels.end()) {
last_level = it->second;
} else {
last_level = global_config.default_level;
}
}
slist->value_.store(last_level, std::memory_order_seq_cst);
slist = slist->next_;
}
}
int VerboseFlag::RegisterVerboseFlag(VerboseFlag* flag) {
std::string_view flag_name(flag->name_);
absl::MutexLock lock(&g_mutex);
int old_v = flag->value_.load(std::memory_order_relaxed);
if (old_v == kValueUninitialized) {
const auto& global_config = GetLoggingLevelConfig();
if (auto it = global_config.levels.find(flag_name);
it != global_config.levels.end()) {
old_v = it->second;
} else {
old_v = global_config.default_level;
}
flag->value_.store(old_v, std::memory_order_relaxed);
flag->next_ = std::exchange(g_list_head, flag);
}
return old_v;
}
bool VerboseFlag::VerboseFlagSlowPath(VerboseFlag* flag, int old_v, int level) {
if (ABSL_PREDICT_TRUE(old_v != kValueUninitialized)) {
return level >= 0;
}
old_v = RegisterVerboseFlag(flag);
return ABSL_PREDICT_FALSE(old_v >= level);
}
static_assert(std::is_trivially_destructible<VerboseFlag>::value,
"VerboseFlag must be trivially destructible");
}
} | #include "tensorstore/internal/log/verbose_flag.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/attributes.h"
using ::tensorstore::internal_log::UpdateVerboseLogging;
using ::tensorstore::internal_log::VerboseFlag;
#define TENSORSTORE_VERBOSE_FLAG(X) \
[]() -> ::tensorstore::internal_log::VerboseFlag& { \
ABSL_CONST_INIT static ::tensorstore::internal_log::VerboseFlag flag(X); \
return flag; \
}()
namespace {
TEST(VerboseFlag, Basic) {
UpdateVerboseLogging("a=2", true);
ABSL_CONST_INIT static VerboseFlag a1("a");
auto& b = TENSORSTORE_VERBOSE_FLAG("b");
EXPECT_THAT((bool)a1, true);
EXPECT_THAT(a1.Level(0), true);
EXPECT_THAT(a1.Level(1), true);
EXPECT_THAT(a1.Level(2), true);
EXPECT_THAT(a1.Level(3), false);
EXPECT_THAT((bool)b, false);
EXPECT_THAT(b.Level(0), false);
UpdateVerboseLogging("b,a=-1", false);
EXPECT_THAT((bool)a1, false);
EXPECT_THAT(a1.Level(0), false);
EXPECT_THAT(a1.Level(1), false);
EXPECT_THAT((bool)b, true);
EXPECT_THAT(b.Level(0), true);
EXPECT_THAT(b.Level(1), false);
}
} |
688 | cpp | google/tensorstore | value_as | tensorstore/internal/json/value_as.cc | tensorstore/internal/json/value_as_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON__VALUE_AS_H_
#define TENSORSTORE_INTERNAL_JSON__VALUE_AS_H_
#include <stdint.h>
#include <cstddef>
#include <limits>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/type_traits.h"
namespace tensorstore {
namespace internal_json {
absl::Status ExpectedError(const ::nlohmann::json& j,
std::string_view type_name);
absl::Status ValidationError(const ::nlohmann::json& j,
std::string_view type_name);
inline constexpr const char* GetTypeName(internal::type_identity<int64_t>) {
return "64-bit signed integer";
}
inline constexpr const char* GetTypeName(internal::type_identity<uint64_t>) {
return "64-bit unsigned integer";
}
inline constexpr const char* GetTypeName(internal::type_identity<int32_t>) {
return "32-bit signed integer";
}
inline constexpr const char* GetTypeName(internal::type_identity<uint32_t>) {
return "32-bit unsigned integer";
}
inline constexpr const char* GetTypeName(internal::type_identity<double>) {
return "64-bit floating-point number";
}
inline constexpr const char* GetTypeName(internal::type_identity<std::string>) {
return "string";
}
inline constexpr const char* GetTypeName(internal::type_identity<bool>) {
return "boolean";
}
inline constexpr const char* GetTypeName(
internal::type_identity<std::nullptr_t>) {
return "null";
}
inline constexpr const char* GetTypeName(...) { return nullptr; }
template <typename T>
struct JsonRequireIntegerImpl {
static absl::Status Execute(const ::nlohmann::json& json, T* result,
bool strict, T min_value, T max_value);
};
template <typename T>
std::optional<T> JsonValueAs(const ::nlohmann::json& j, bool strict = false) {
static_assert(!std::is_same_v<T, T>, "Target type not supported.");
}
template <>
std::optional<std::nullptr_t> JsonValueAs<std::nullptr_t>(
const ::nlohmann::json& j, bool strict);
template <>
std::optional<bool> JsonValueAs<bool>(const ::nlohmann::json& j, bool strict);
template <>
std::optional<int64_t> JsonValueAs<int64_t>(const ::nlohmann::json& j,
bool strict);
template <>
std::optional<uint64_t> JsonValueAs<uint64_t>(const ::nlohmann::json& j,
bool strict);
template <>
std::optional<double> JsonValueAs<double>(const ::nlohmann::json& j,
bool strict);
template <>
std::optional<std::string> JsonValueAs<std::string>(const ::nlohmann::json& j,
bool strict);
template <typename T, typename ValidateFn>
std::enable_if_t<!std::is_same_v<ValidateFn, bool>, absl::Status>
JsonRequireValueAs(const ::nlohmann::json& j, T* result, ValidateFn is_valid,
bool strict = false) {
auto value = JsonValueAs<T>(j, strict);
if (!value) {
return internal_json::ExpectedError(
j, internal_json::GetTypeName(internal::type_identity<T>{}));
}
if (!is_valid(*value)) {
return internal_json::ValidationError(
j, internal_json::GetTypeName(internal::type_identity<T>{}));
}
if (result != nullptr) {
*result = std::move(*value);
}
return absl::OkStatus();
}
template <typename T>
absl::Status JsonRequireValueAs(const ::nlohmann::json& j, T* result,
bool strict = false) {
return JsonRequireValueAs(
j, result, [](const T&) { return true; }, strict);
}
template <typename T>
absl::Status JsonRequireInteger(
const ::nlohmann::json& json, T* result, bool strict = false,
internal::type_identity_t<T> min_value = std::numeric_limits<T>::min(),
internal::type_identity_t<T> max_value = std::numeric_limits<T>::max()) {
static_assert(std::numeric_limits<T>::is_integer,
"T must be an integer type.");
using U =
std::conditional_t<std::numeric_limits<T>::is_signed, int64_t, uint64_t>;
U temp;
auto status = internal_json::JsonRequireIntegerImpl<U>::Execute(
json, &temp, strict, min_value, max_value);
if (status.ok()) *result = temp;
return status;
}
}
}
#endif
#include "tensorstore/internal/json/value_as.h"
#include <stdint.h>
#include <cmath>
#include <cstddef>
#include <limits>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json {
absl::Status ExpectedError(const ::nlohmann::json& j,
std::string_view type_name) {
if (j.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected ", type_name, ", but member is missing"));
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ", type_name, ", but received: ", j.dump()));
}
absl::Status ValidationError(const ::nlohmann::json& j,
std::string_view type_name) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Validation of ", type_name, " failed, received: ", j.dump()));
}
template <typename T>
absl::Status JsonRequireIntegerImpl<T>::Execute(const ::nlohmann::json& json,
T* result, bool strict,
T min_value, T max_value) {
if (auto x = JsonValueAs<T>(json, strict)) {
if (*x >= min_value && *x <= max_value) {
*result = *x;
return absl::OkStatus();
}
}
constexpr const char* kTypeName = []() {
if constexpr (sizeof(T) == 4 && std::is_signed_v<T>)
return "32-bit signed integer";
if constexpr (sizeof(T) == 4 && std::is_unsigned_v<T>)
return "32-bit unsigned integer";
if constexpr (sizeof(T) == 8 && std::is_signed_v<T>)
return "64-bit signed integer";
if constexpr (sizeof(T) == 8 && std::is_unsigned_v<T>)
return "64-bit unsigned integer";
return GetTypeName(internal::type_identity_t<T>{});
}();
if constexpr (kTypeName != nullptr) {
if (min_value == std::numeric_limits<T>::min() &&
max_value == std::numeric_limits<T>::max()) {
return internal_json::ExpectedError(json, kTypeName);
}
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected integer in the range [", min_value, ", ",
max_value, "], but received: ", json.dump()));
}
template struct JsonRequireIntegerImpl<int64_t>;
template struct JsonRequireIntegerImpl<uint64_t>;
template <>
std::optional<std::nullptr_t> JsonValueAs<std::nullptr_t>(
const ::nlohmann::json& j, bool strict) {
if (j.is_null()) {
return nullptr;
}
return std::nullopt;
}
template <>
std::optional<bool> JsonValueAs<bool>(const ::nlohmann::json& j, bool strict) {
if (j.is_boolean()) {
return j.get<bool>();
}
if (!strict && j.is_string()) {
const auto& str = j.get_ref<std::string const&>();
if (str == "true") return true;
if (str == "false") return false;
}
return std::nullopt;
}
template <>
std::optional<int64_t> JsonValueAs<int64_t>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number_unsigned()) {
auto x = j.get<uint64_t>();
if (x <= static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
return static_cast<int64_t>(x);
}
} else if (j.is_number_integer()) {
return j.get<int64_t>();
} else if (j.is_number_float()) {
auto x = j.get<double>();
if (x >= -9223372036854775808.0 &&
x < 9223372036854775808.0 && x == std::floor(x)) {
return static_cast<int64_t>(x);
}
} else if (!strict) {
if (j.is_string()) {
int64_t result = 0;
if (absl::SimpleAtoi(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
}
return std::nullopt;
}
template <>
std::optional<uint64_t> JsonValueAs<uint64_t>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number_unsigned()) {
return j.get<uint64_t>();
} else if (j.is_number_integer()) {
int64_t x = j.get<int64_t>();
if (x >= 0) {
return static_cast<uint64_t>(x);
}
} else if (j.is_number_float()) {
double x = j.get<double>();
if (x >= 0.0 && x < 18446744073709551616.0 &&
x == std::floor(x)) {
return static_cast<uint64_t>(x);
}
} else if (!strict) {
if (j.is_string()) {
uint64_t result = 0;
if (absl::SimpleAtoi(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
}
return std::nullopt;
}
template <>
std::optional<double> JsonValueAs<double>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number()) {
return j.get<double>();
}
if (!strict && j.is_string()) {
double result = 0;
if (absl::SimpleAtod(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
return std::nullopt;
}
template <>
std::optional<std::string> JsonValueAs<std::string>(const ::nlohmann::json& j,
bool strict) {
if (j.is_string()) {
return j.get<std::string>();
}
return std::nullopt;
}
}
} | #include "tensorstore/internal/json/value_as.h"
#include <stdint.h>
#include <map>
#include <optional>
#include <set>
#include <string>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_json::JsonRequireInteger;
using ::tensorstore::internal_json::JsonRequireValueAs;
using ::tensorstore::internal_json::JsonValueAs;
template <typename T, bool kStrict = true>
std::optional<T> JsonMemberT(const ::nlohmann::json::object_t& j,
const char* member) {
auto it = j.find(member);
if (it == j.end()) {
return std::nullopt;
}
return JsonValueAs<T>(it->second, kStrict);
}
template <typename T, bool kStrict = true>
std::optional<T> JsonMemberT(const ::nlohmann::json& j, const char* member) {
if (const auto* obj = j.get_ptr<const ::nlohmann::json::object_t*>()) {
return JsonMemberT<T, kStrict>(*obj, member);
}
return std::nullopt;
}
TEST(JsonTest, Meta) {
auto JsonRequireString = [](const ::nlohmann::json& json,
const char* member) -> bool {
auto v = JsonMemberT<std::string>(json, member);
return v.has_value() && !v->empty();
};
auto JsonRequireInt = [](const ::nlohmann::json& json,
const char* member) -> bool {
auto v = JsonMemberT<int64_t, false>(json, member);
return v.has_value();
};
auto meta = ::nlohmann::json::meta();
EXPECT_TRUE(JsonRequireString(meta, "copyright"));
EXPECT_TRUE(JsonRequireString(meta, "name"));
EXPECT_TRUE(JsonRequireString(meta, "url"));
EXPECT_TRUE(JsonRequireString(meta, "platform"));
EXPECT_TRUE(JsonRequireString(meta, "copyright"));
EXPECT_TRUE(meta.find("compiler") != meta.end());
auto compiler = meta["compiler"];
EXPECT_TRUE(JsonRequireString(compiler, "c++"));
EXPECT_FALSE(JsonRequireString(meta, "version"));
auto version = meta["version"];
EXPECT_TRUE(JsonRequireInt(version, "major"));
}
::nlohmann::json GetDefaultJSON() {
return ::nlohmann::json{
{"bool_true", true}, {"bool_false", false}, {"str_bool", "true"},
{"signed", 456}, {"neg_signed", -567}, {"unsigned", 565u},
{"float", 456.789}, {"neg_float", -678.91}, {"int_float", 122.0},
{"str", "abc"}, {"str_number", "789"}, {"str_float", "123.40"},
{"nil", nullptr}, {"empty_obj", {}}, {"obj", {"a", 1}},
};
}
std::set<std::string> GetKeys() {
return std::set<std::string>{{
"bool_true",
"bool_false",
"str_bool",
"signed",
"neg_signed",
"unsigned",
"float",
"neg_float",
"int_float",
"str",
"abc",
"str_number",
"str_float",
"nil",
"empty_obj",
"obj",
"missing",
}};
}
TEST(JsonTest, JsonParseBool) {
auto keys = GetKeys();
auto JsonParseBool = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<bool, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseBool(result, "bool_true"));
EXPECT_EQ(true, *JsonParseBool(result, "bool_true"));
ASSERT_TRUE(JsonParseBool(result, "bool_false"));
EXPECT_EQ(false, *JsonParseBool(result, "bool_false"));
ASSERT_TRUE(JsonParseBool(result, "str_bool"));
EXPECT_EQ(true, *JsonParseBool(result, "str_bool"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseBool(result, x.c_str())) << x;
}
EXPECT_EQ(std::nullopt, JsonValueAs<bool>(::nlohmann::json("a")));
EXPECT_EQ(false, JsonValueAs<bool>(::nlohmann::json("false")));
EXPECT_EQ(true, JsonValueAs<bool>(::nlohmann::json("true")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt, JsonValueAs<bool>(::nlohmann::json("true"), kStrict));
EXPECT_EQ(true, JsonValueAs<bool>(::nlohmann::json(true), kStrict));
EXPECT_EQ(false, JsonValueAs<bool>(::nlohmann::json(false), kStrict));
}
TEST(JsonValueAsTest, Int64FromUint64) {
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(0x8fffffffffffffffu)));
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(0xffffffffffffffffu)));
EXPECT_EQ(0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json(0x7fffffffffffffffu)));
const bool kStrict = true;
EXPECT_EQ(
0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json(0x7fffffffffffffffu), kStrict));
}
TEST(JsonValueAsTest, Int64FromDouble) {
EXPECT_EQ(std::nullopt, JsonValueAs<int64_t>(::nlohmann::json(0.5)));
EXPECT_EQ(1, JsonValueAs<int64_t>(::nlohmann::json(1.0)));
EXPECT_EQ(
std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(9223372036854775808.0 )));
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(-9223372036854777856.0)));
EXPECT_EQ(9223372036854774784,
JsonValueAs<int64_t>(::nlohmann::json(9223372036854774784.0)));
EXPECT_EQ(
-0x8000000000000000,
JsonValueAs<int64_t>(::nlohmann::json(-9223372036854775808.0 )));
}
TEST(JsonValueAsTest, Int64FromString) {
EXPECT_EQ(-1, JsonValueAs<int64_t>(::nlohmann::json("-1")));
EXPECT_EQ(-0x8000000000000000,
JsonValueAs<int64_t>(::nlohmann::json("-9223372036854775808")));
EXPECT_EQ(0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json("9223372036854775807")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0.0")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0a")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0x0")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0xf")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("9223372036854775808")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("-9223372036854775809")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json("-1"), kStrict));
}
TEST(JsonValueAsTest, Uint64FromDouble) {
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(0.5)));
EXPECT_EQ(1, JsonValueAs<uint64_t>(::nlohmann::json(1.0)));
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(
18446744073709551616.0 )));
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(-1.0)));
EXPECT_EQ(18446744073709549568u,
JsonValueAs<uint64_t>(::nlohmann::json(18446744073709549568.0)));
}
TEST(JsonValueAsTest, Uint64FromString) {
EXPECT_EQ(0xffffffffffffffffu,
JsonValueAs<uint64_t>(::nlohmann::json("18446744073709551615")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0.0")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0a")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0x0")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0xf")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("-1")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt,
JsonValueAs<uint64_t>(::nlohmann::json("1"), kStrict));
}
TEST(JsonTest, JsonParseInt) {
auto keys = GetKeys();
auto JsonParseInt = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<int64_t, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseInt(result, "signed"));
EXPECT_EQ(456, *JsonParseInt(result, "signed"));
ASSERT_TRUE(JsonParseInt(result, "neg_signed"));
EXPECT_EQ(-567, *JsonParseInt(result, "neg_signed"));
ASSERT_TRUE(JsonParseInt(result, "unsigned"));
EXPECT_EQ(565, *JsonParseInt(result, "unsigned"));
ASSERT_TRUE(JsonParseInt(result, "int_float"));
EXPECT_EQ(122, *JsonParseInt(result, "int_float"));
ASSERT_TRUE(JsonParseInt(result, "str_number"));
EXPECT_EQ(789, *JsonParseInt(result, "str_number"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseInt(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseUnsigned) {
auto keys = GetKeys();
auto JsonParseUnsigned = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<uint64_t, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseUnsigned(result, "signed"));
EXPECT_EQ(456, *JsonParseUnsigned(result, "signed"));
ASSERT_TRUE(JsonParseUnsigned(result, "unsigned"));
EXPECT_EQ(565, *JsonParseUnsigned(result, "unsigned"));
ASSERT_TRUE(JsonParseUnsigned(result, "int_float"));
EXPECT_EQ(122, *JsonParseUnsigned(result, "int_float"));
ASSERT_TRUE(JsonParseUnsigned(result, "str_number"));
EXPECT_EQ(789, *JsonParseUnsigned(result, "str_number"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseUnsigned(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseDouble) {
auto keys = GetKeys();
auto JsonParseDouble = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<double, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseDouble(result, "signed"));
EXPECT_EQ(456, *JsonParseDouble(result, "signed"));
ASSERT_TRUE(JsonParseDouble(result, "neg_signed"));
EXPECT_EQ(-567, *JsonParseDouble(result, "neg_signed"));
ASSERT_TRUE(JsonParseDouble(result, "unsigned"));
EXPECT_EQ(565, *JsonParseDouble(result, "unsigned"));
ASSERT_TRUE(JsonParseDouble(result, "float"));
EXPECT_EQ(456.789, *JsonParseDouble(result, "float"));
ASSERT_TRUE(JsonParseDouble(result, "neg_float"));
EXPECT_EQ(-678.91, *JsonParseDouble(result, "neg_float"));
ASSERT_TRUE(JsonParseDouble(result, "int_float"));
EXPECT_EQ(122, *JsonParseDouble(result, "int_float"));
ASSERT_TRUE(JsonParseDouble(result, "str_number"));
EXPECT_EQ(789, *JsonParseDouble(result, "str_number"));
ASSERT_TRUE(JsonParseDouble(result, "str_float"));
EXPECT_EQ(123.4, *JsonParseDouble(result, "str_float"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseDouble(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseString) {
auto keys = GetKeys();
auto JsonParseString = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<std::string>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseString(result, "str_bool"));
EXPECT_EQ("true", *JsonParseString(result, "str_bool"));
ASSERT_TRUE(JsonParseString(result, "str"));
EXPECT_EQ("abc", *JsonParseString(result, "str"));
ASSERT_TRUE(JsonParseString(result, "str_number"));
EXPECT_EQ("789", *JsonParseString(result, "str_number"));
ASSERT_TRUE(JsonParseString(result, "str_float"));
EXPECT_EQ("123.40", *JsonParseString(result, "str_float"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseString(result, x.c_str())) << x;
}
}
TEST(JsonRequireValueAs, Success) {
{
bool v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(true), &v, true).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("true"), &v, false).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("true"), &v, [](bool) {
return true;
}).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(
JsonRequireValueAs<bool>(::nlohmann::json(true), nullptr, true).ok());
}
{
int64_t v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(-3), &v, true).ok());
EXPECT_EQ(-3, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(-4.0), &v, false).ok());
EXPECT_EQ(-4, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("-5"), &v, false).ok());
EXPECT_EQ(-5, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("-5"), &v, [](int64_t) {
return true;
}).ok());
EXPECT_EQ(-5, v);
EXPECT_TRUE(
JsonRequireValueAs<int64_t>(::nlohmann::json(-3), nullptr, true).ok());
}
{
uint64_t v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(6), &v, true).ok());
EXPECT_EQ(6, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(7.0), &v, false).ok());
EXPECT_EQ(7, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("8"), &v, false).ok());
EXPECT_EQ(8, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("8"), &v, [](uint64_t) {
return true;
}).ok());
EXPECT_EQ(8, v);
EXPECT_TRUE(
JsonRequireValueAs<uint64_t>(::nlohmann::json(3), nullptr, true).ok());
}
{
double v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(0.5), &v, true).ok());
EXPECT_EQ(0.5, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("2.0"), &v, false).ok());
EXPECT_EQ(2.0, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("2.0"), &v, [](double) {
return true;
}).ok());
EXPECT_EQ(2.0, v);
EXPECT_TRUE(
JsonRequireValueAs<double>(::nlohmann::json(3.0), nullptr, true).ok());
}
{
std::string v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("x"), &v, false).ok());
EXPECT_EQ("x", v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("y"), &v, [](std::string) {
return true;
}).ok());
EXPECT_EQ("y", v);
EXPECT_TRUE(
JsonRequireValueAs<std::string>(::nlohmann::json("z"), nullptr, true)
.ok());
}
}
TEST(JsonRequireValueAs, Failure) {
{
bool v;
EXPECT_THAT(JsonRequireValueAs(::nlohmann::json("true"), &v, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected boolean, but received: \"true\""));
}
EXPECT_THAT(JsonRequireValueAs<bool>(::nlohmann::json("true"), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected boolean, but received: \"true\""));
EXPECT_THAT(JsonRequireValueAs<bool>(::nlohmann::json(true), nullptr,
[](bool) { return false; }),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Validation of boolean failed, received: true"));
EXPECT_THAT(
JsonRequireValueAs<int64_t>(::nlohmann::json("true"), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 64-bit signed integer, but received: \"true\""));
EXPECT_THAT(
JsonRequireValueAs<uint64_t>(::nlohmann::json(3.5), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 64-bit unsigned integer, but received: 3.5"));
EXPECT_THAT(
JsonRequireValueAs<std::string>(::nlohmann::json(true), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string, but received: true"));
}
TEST(JsonRequireIntegerTest, Success) {
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-5), &result_int32,
true, -7, -3));
EXPECT_EQ(-5, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-7), &result_int32,
true, -7, -3));
EXPECT_EQ(-7, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json("-7"), &result_int32,
false, -7, -3));
EXPECT_EQ(-7, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-3), &result_int32,
true, -7, -3));
EXPECT_EQ(-3, result_int32);
}
{
uint32_t result_uint32 = 42;
EXPECT_EQ(absl::OkStatus(),
JsonRequireInteger(::nlohmann::json(5), &result_uint32,
true, 2, 7));
EXPECT_EQ(5u, result_uint32);
}
{
std::int16_t result_int16 = 42;
EXPECT_EQ(absl::OkStatus(),
JsonRequireInteger(::nlohmann::json(5), &result_int16,
true, 2, 7));
EXPECT_EQ(5, result_int16);
}
}
TEST(JsonRequireIntegerTest, Failure) {
{
std::int32_t result_int32 = 42;
EXPECT_THAT(
JsonRequireInteger(::nlohmann::json(-2), &result_int32, true,
-7, -3),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[-7, -3\\], but received: -2"));
EXPECT_EQ(42, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_THAT(JsonRequireInteger(::nlohmann::json(true), &result_int32,
true, -7, -3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[-7, -3\\], but "
"received: true"));
EXPECT_EQ(42, result_int32);
}
{
uint32_t result_uint32 = 42;
EXPECT_THAT(
JsonRequireInteger(::nlohmann::json(11), &result_uint32,
true, 5, 10),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[5, 10\\], but received: 11"));
EXPECT_EQ(42u, result_uint32);
}
}
} |
689 | cpp | google/tensorstore | pprint_python | tensorstore/internal/json/pprint_python.cc | tensorstore/internal/json/pprint_python_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON__PPRINT_PYTHON_H_
#define TENSORSTORE_INTERNAL_JSON__PPRINT_PYTHON_H_
#include <string>
#include <string_view>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_python {
struct PrettyPrintJsonAsPythonOptions {
int indent = 2;
int width = 80;
int cur_line_indent = 0;
int subsequent_indent = 0;
};
void PrettyPrintJsonAsPython(
std::string* out, const ::nlohmann::json& j,
const PrettyPrintJsonAsPythonOptions& options = {});
std::string PrettyPrintJsonAsPython(
const ::nlohmann::json& j,
const PrettyPrintJsonAsPythonOptions& options = {});
std::string PrettyPrintJsonAsPythonRepr(
const Result<::nlohmann::json>& j, std::string_view prefix,
std::string_view suffix,
const PrettyPrintJsonAsPythonOptions& options = {});
}
}
#endif
#include "tensorstore/internal/json/pprint_python.h"
#include <cstddef>
#include <string>
#include <string_view>
#include "absl/strings/escaping.h"
#include <nlohmann/json.hpp>
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_python {
namespace {
void FormatStringForPython(std::string* out, std::string_view s) {
*out += '\'';
*out += absl::CHexEscape(s);
*out += '\'';
}
void FormatAsSingleLineForPython(std::string* out, const ::nlohmann::json& j) {
switch (j.type()) {
case ::nlohmann::json::value_t::object: {
*out += "{";
bool first = true;
for (const auto& [key, value] :
j.get_ref<const ::nlohmann::json::object_t&>()) {
if (!first) {
*out += ", ";
} else {
first = false;
}
FormatStringForPython(out, key);
*out += ": ";
FormatAsSingleLineForPython(out, value);
}
*out += "}";
break;
}
case ::nlohmann::json::value_t::array: {
*out += '[';
bool first = true;
for (const auto& x : j.get_ref<const ::nlohmann::json::array_t&>()) {
if (!first) {
*out += ", ";
} else {
first = false;
}
FormatAsSingleLineForPython(out, x);
}
*out += ']';
break;
}
case ::nlohmann::json::value_t::string: {
FormatStringForPython(out, j.get_ref<const std::string&>());
break;
}
case ::nlohmann::json::value_t::binary: {
auto& s = j.get_ref<const ::nlohmann::json::binary_t&>();
*out += 'b';
FormatStringForPython(
out,
std::string_view(reinterpret_cast<const char*>(s.data()), s.size()));
break;
}
case ::nlohmann::json::value_t::boolean: {
*out += (j.get_ref<const bool&>() ? "True" : "False");
break;
}
case ::nlohmann::json::value_t::null: {
*out += "None";
break;
}
default:
*out += j.dump();
break;
}
}
void PrettyPrintJsonObjectAsPythonInternal(
std::string* out, const ::nlohmann::json::object_t& obj,
PrettyPrintJsonAsPythonOptions options) {
*out += '{';
for (const auto& [key, value] : obj) {
*out += '\n';
auto new_options = options;
new_options.subsequent_indent += options.indent;
new_options.cur_line_indent = new_options.subsequent_indent;
new_options.width -= 1;
out->append(new_options.subsequent_indent, ' ');
size_t prev_size = out->size();
FormatStringForPython(out, key);
size_t key_repr_len = out->size() - prev_size;
*out += ": ";
new_options.cur_line_indent += key_repr_len + 2;
PrettyPrintJsonAsPython(out, value, new_options);
*out += ',';
}
if (!obj.empty()) {
*out += '\n';
out->append(options.subsequent_indent, ' ');
}
*out += '}';
}
void PrettyPrintJsonArrayAsPythonInternal(
std::string* out, const ::nlohmann::json::array_t& arr,
PrettyPrintJsonAsPythonOptions options) {
*out += '[';
auto new_options = options;
new_options.subsequent_indent += options.indent;
new_options.cur_line_indent = new_options.subsequent_indent;
new_options.width -= 1;
for (const auto& value : arr) {
*out += '\n';
out->append(new_options.subsequent_indent, ' ');
PrettyPrintJsonAsPython(out, value, new_options);
*out += ',';
}
if (!arr.empty()) {
*out += '\n';
out->append(options.subsequent_indent, ' ');
}
*out += ']';
}
}
void PrettyPrintJsonAsPython(std::string* out, const ::nlohmann::json& j,
const PrettyPrintJsonAsPythonOptions& options) {
size_t existing_size = out->size();
FormatAsSingleLineForPython(out, j);
std::ptrdiff_t added_size = out->size() - existing_size;
int max_width = options.width - options.cur_line_indent;
if (added_size > max_width) {
if (const auto* obj = j.get_ptr<const ::nlohmann::json::object_t*>()) {
out->resize(existing_size);
PrettyPrintJsonObjectAsPythonInternal(out, *obj, options);
return;
} else if (const auto* arr =
j.get_ptr<const ::nlohmann::json::array_t*>()) {
out->resize(existing_size);
PrettyPrintJsonArrayAsPythonInternal(out, *arr, options);
return;
}
}
}
std::string PrettyPrintJsonAsPython(
const ::nlohmann::json& j, const PrettyPrintJsonAsPythonOptions& options) {
std::string out;
PrettyPrintJsonAsPython(&out, j, options);
return out;
}
std::string PrettyPrintJsonAsPythonRepr(
const Result<::nlohmann::json>& j, std::string_view prefix,
std::string_view suffix, const PrettyPrintJsonAsPythonOptions& options) {
std::string pretty{prefix};
const char* dotdotdot = "...";
if (j.ok()) {
PrettyPrintJsonAsPythonOptions adjusted_options = options;
adjusted_options.width -= suffix.size();
adjusted_options.cur_line_indent += prefix.size();
PrettyPrintJsonAsPython(&pretty, *j, options);
dotdotdot = "";
}
tensorstore::StrAppend(&pretty, dotdotdot, suffix);
return pretty;
}
}
} | #include "tensorstore/internal/json/pprint_python.h"
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::internal_python::PrettyPrintJsonAsPython;
using ::tensorstore::internal_python::PrettyPrintJsonAsPythonRepr;
TEST(PrettyPrintJsonAsPythonTest, Basic) {
EXPECT_EQ("None", PrettyPrintJsonAsPython(::nlohmann::json(nullptr)));
EXPECT_EQ("True", PrettyPrintJsonAsPython(::nlohmann::json(true)));
EXPECT_EQ("False", PrettyPrintJsonAsPython(::nlohmann::json(false)));
EXPECT_EQ("'abc'", PrettyPrintJsonAsPython(::nlohmann::json("abc")));
EXPECT_EQ("b'abc'",
PrettyPrintJsonAsPython(::nlohmann::json(::nlohmann::json::binary_t(
std::vector<uint8_t>{'a', 'b', 'c'}))));
EXPECT_EQ("1", PrettyPrintJsonAsPython(::nlohmann::json(1)));
EXPECT_EQ("1.5", PrettyPrintJsonAsPython(::nlohmann::json(1.5)));
EXPECT_EQ("[1, 2, 3]", PrettyPrintJsonAsPython(::nlohmann::json({1, 2, 3})));
EXPECT_EQ("[1, 2, 3]",
PrettyPrintJsonAsPython(::nlohmann::json({1, 2, 3}),
{2, 9}));
EXPECT_EQ(R"([
1,
2,
3,
])",
PrettyPrintJsonAsPython(::nlohmann::json({1, 2, 3}),
{2, 5}));
EXPECT_EQ("{'a': 1, 'b': 2, 'c': 3}",
PrettyPrintJsonAsPython(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", 3}})));
EXPECT_EQ(
"{'a': 1, 'b': 2, 'c': 3}",
PrettyPrintJsonAsPython(::nlohmann::json({{"a", 1}, {"b", 2}, {"c", 3}}),
{2, 24}));
EXPECT_EQ(
R"({
'a': 1,
'b': 2,
'c': 3,
})",
PrettyPrintJsonAsPython(::nlohmann::json({{"a", 1}, {"b", 2}, {"c", 3}}),
{2, 10}));
EXPECT_EQ(
R"({
'a': 1,
'b': 2,
'c': [
1,
2,
3,
4,
],
})",
PrettyPrintJsonAsPython(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", {1, 2, 3, 4}}}),
{2, 10}));
EXPECT_EQ(
R"({
'a': 1,
'b': 2,
'c': [1, 2, 3, 4],
})",
PrettyPrintJsonAsPython(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", {1, 2, 3, 4}}}),
{2, 21}));
}
TEST(PrettyPrintJsonAsPythonReprTest, Basic) {
EXPECT_EQ("Foo(None)", PrettyPrintJsonAsPythonRepr(::nlohmann::json(nullptr),
"Foo(", ")"));
EXPECT_EQ("Foo(...)",
PrettyPrintJsonAsPythonRepr(absl::UnknownError(""), "Foo(", ")"));
EXPECT_EQ(
R"(Foo({
'a': 1,
'b': 2,
'c': [1, 2, 3, 4],
}))",
PrettyPrintJsonAsPythonRepr(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", {1, 2, 3, 4}}}), "Foo(",
")", {2, 21}));
}
} |
690 | cpp | google/tensorstore | same | tensorstore/internal/json/same.cc | tensorstore/internal/json/same_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_SAME_H_
#define TENSORSTORE_INTERNAL_JSON_SAME_H_
#include <nlohmann/json_fwd.hpp>
namespace tensorstore {
namespace internal_json {
bool JsonSame(const ::nlohmann::json& a, const ::nlohmann::json& b);
}
}
#endif
#include "tensorstore/internal/json/same.h"
#include <variant>
#include "absl/container/inlined_vector.h"
#include <nlohmann/json.hpp>
namespace tensorstore {
namespace internal_json {
bool JsonSame(const ::nlohmann::json& a, const ::nlohmann::json& b) {
using value_t = ::nlohmann::json::value_t;
using array_t = ::nlohmann::json::array_t;
using object_t = ::nlohmann::json::object_t;
struct ArrayIterators {
array_t::const_iterator a_cur, a_end, b_cur;
};
struct ObjectIterators {
object_t::const_iterator a_cur, a_end, b_cur;
};
using StackEntry = std::variant<ArrayIterators, ObjectIterators>;
absl::InlinedVector<StackEntry, 64> stack;
const auto compare_or_defer_values = [&](const ::nlohmann::json& a_value,
const ::nlohmann::json& b_value) {
const auto t = a_value.type();
switch (t) {
case value_t::discarded:
case value_t::null:
return b_value.type() == t;
case value_t::array: {
if (b_value.type() != t) return false;
const auto& a_arr = a_value.get_ref<const array_t&>();
const auto& b_arr = b_value.get_ref<const array_t&>();
if (a_arr.size() != b_arr.size()) return false;
if (a_arr.empty()) return true;
stack.emplace_back(
ArrayIterators{a_arr.begin(), a_arr.end(), b_arr.begin()});
return true;
}
case value_t::object: {
if (b_value.type() != t) return false;
const auto& a_obj = a_value.get_ref<const object_t&>();
const auto& b_obj = b_value.get_ref<const object_t&>();
if (a_obj.size() != b_obj.size()) return false;
if (a_obj.empty()) return true;
stack.emplace_back(
ObjectIterators{a_obj.begin(), a_obj.end(), b_obj.begin()});
return true;
}
default:
return a_value == b_value;
}
};
if (!compare_or_defer_values(a, b)) return false;
while (!stack.empty()) {
auto& e = stack.back();
if (auto* array_iterators = std::get_if<ArrayIterators>(&e)) {
auto& a_v = *array_iterators->a_cur;
auto& b_v = *array_iterators->b_cur;
if (++array_iterators->a_cur == array_iterators->a_end) {
stack.pop_back();
} else {
++array_iterators->b_cur;
}
if (!compare_or_defer_values(a_v, b_v)) {
return false;
}
} else {
auto* object_iterators = std::get_if<ObjectIterators>(&e);
auto& a_kv = *object_iterators->a_cur;
auto& b_kv = *object_iterators->b_cur;
if (++object_iterators->a_cur == object_iterators->a_end) {
stack.pop_back();
} else {
++object_iterators->b_cur;
}
if (a_kv.first != b_kv.first ||
!compare_or_defer_values(a_kv.second, b_kv.second)) {
return false;
}
}
}
return true;
}
}
} | #include "tensorstore/internal/json/same.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
TEST(JsonSame, Basic) {
EXPECT_TRUE(tensorstore::internal_json::JsonSame(1.0, 1));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
::nlohmann::json::value_t::discarded, ::nlohmann::json::value_t::null));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::value_t::discarded,
::nlohmann::json::value_t::discarded));
EXPECT_TRUE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{1, {1, 2, 3, {{"a", 5}, {"b", 7}}}, 3},
{1, {1, 2, 3, {{"a", 5}, {"b", 7}}}, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::array_t{}, ::nlohmann::json::array_t{}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::object_t{}, ::nlohmann::json::object_t{}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2, 4}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{1, ::nlohmann::json::value_t::discarded, 3},
{1, ::nlohmann::json::value_t::discarded, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 4}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"c", 3}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}, {"d", 4}}));
const auto make_nested = [](int depth) {
::nlohmann::json value;
::nlohmann::json* tail = &value;
for (int i = 0; i < depth; ++i) {
*tail = ::nlohmann::json::object_t();
auto& obj = tail->get_ref<::nlohmann::json::object_t&>();
tail = &obj["a"];
}
return value;
};
auto nested = make_nested(10000);
EXPECT_TRUE(tensorstore::internal_json::JsonSame(nested, nested));
}
} |
691 | cpp | google/tensorstore | pool_impl | tensorstore/internal/thread/pool_impl.cc | tensorstore/internal/thread/pool_impl_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_POOL_IMPL_H_
#define TENSORSTORE_INTERNAL_THREAD_POOL_IMPL_H_
#include <stddef.h>
#include <cassert>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorstore/internal/container/circular_queue.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/thread/task_provider.h"
namespace tensorstore {
namespace internal_thread_impl {
class SharedThreadPool
: public internal::AtomicReferenceCount<SharedThreadPool> {
public:
SharedThreadPool();
void NotifyWorkAvailable(internal::IntrusivePtr<TaskProvider>)
ABSL_LOCKS_EXCLUDED(mutex_);
private:
struct Overseer;
struct Worker;
internal::IntrusivePtr<TaskProvider> FindActiveTaskProvider()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void StartOverseer() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void StartWorker(internal::IntrusivePtr<TaskProvider>, absl::Time now)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
absl::Mutex mutex_;
size_t worker_threads_ ABSL_GUARDED_BY(mutex_) = 0;
size_t idle_threads_ ABSL_GUARDED_BY(mutex_) = 0;
absl::CondVar overseer_condvar_;
bool overseer_running_ ABSL_GUARDED_BY(mutex_) = false;
absl::Time last_thread_start_time_ ABSL_GUARDED_BY(mutex_) =
absl::InfinitePast();
absl::Time last_thread_exit_time_ ABSL_GUARDED_BY(mutex_) =
absl::InfinitePast();
absl::Time queue_assignment_time_ ABSL_GUARDED_BY(mutex_) =
absl::InfinitePast();
absl::flat_hash_set<TaskProvider*> in_queue_ ABSL_GUARDED_BY(mutex_);
internal_container::CircularQueue<internal::IntrusivePtr<TaskProvider>>
waiting_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "tensorstore/internal/thread/pool_impl.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/thread/task_provider.h"
#include "tensorstore/internal/thread/thread.h"
namespace tensorstore {
namespace internal_thread_impl {
namespace {
constexpr absl::Duration kThreadStartDelay = absl::Milliseconds(5);
constexpr absl::Duration kThreadExitDelay = absl::Milliseconds(5);
constexpr absl::Duration kThreadIdleBeforeExit = absl::Seconds(20);
constexpr absl::Duration kOverseerIdleBeforeExit = absl::Seconds(20);
auto& thread_pool_started = internal_metrics::Counter<int64_t>::New(
"/tensorstore/thread_pool/started", "Threads started by SharedThreadPool");
auto& thread_pool_active = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/thread_pool/active",
"Active threads managed by SharedThreadPool");
auto& thread_pool_task_providers = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/thread_pool/task_providers",
"TaskProviders requesting threads from SharedThreadPool");
ABSL_CONST_INIT internal_log::VerboseFlag thread_pool_logging("thread_pool");
}
SharedThreadPool::SharedThreadPool() : waiting_(128) {
ABSL_LOG_IF(INFO, thread_pool_logging) << "SharedThreadPool: " << this;
}
void SharedThreadPool::NotifyWorkAvailable(
internal::IntrusivePtr<TaskProvider> task_provider) {
absl::MutexLock lock(&mutex_);
if (in_queue_.insert(task_provider.get()).second) {
waiting_.push_back(std::move(task_provider));
}
if (!overseer_running_) {
StartOverseer();
} else {
overseer_condvar_.Signal();
}
}
internal::IntrusivePtr<TaskProvider>
SharedThreadPool::FindActiveTaskProvider() {
for (int i = waiting_.size(); i > 0; i--) {
internal::IntrusivePtr<TaskProvider> ptr = std::move(waiting_.front());
waiting_.pop_front();
auto work = ptr->EstimateThreadsRequired();
if (work == 0) {
in_queue_.erase(ptr.get());
continue;
}
if (work == 1) {
in_queue_.erase(ptr.get());
} else {
waiting_.push_back(ptr);
}
thread_pool_task_providers.Set(waiting_.size());
return ptr;
}
return nullptr;
}
struct SharedThreadPool::Overseer {
internal::IntrusivePtr<SharedThreadPool> pool_;
mutable absl::Time idle_start_time_;
void operator()() const;
void OverseerBody();
absl::Time MaybeStartWorker(absl::Time now)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(pool_->mutex_);
};
void SharedThreadPool::StartOverseer() {
assert(!overseer_running_);
overseer_running_ = true;
tensorstore::internal::Thread::StartDetached(
{"ts_pool_overseer"},
SharedThreadPool::Overseer{
internal::IntrusivePtr<SharedThreadPool>(this)});
}
void SharedThreadPool::Overseer::operator()() const {
const_cast<SharedThreadPool::Overseer*>(this)->OverseerBody();
}
void SharedThreadPool::Overseer::OverseerBody() {
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "Overseer: " << this;
absl::Time now = absl::Now();
idle_start_time_ = now;
absl::Time deadline = absl::InfinitePast();
absl::MutexLock lock(&pool_->mutex_);
while (true) {
pool_->overseer_condvar_.WaitWithDeadline(&pool_->mutex_, deadline);
now = absl::Now();
deadline = MaybeStartWorker(now);
if (deadline < now) break;
}
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "~Overseer: " << this;
pool_->overseer_running_ = false;
}
absl::Time SharedThreadPool::Overseer::MaybeStartWorker(absl::Time now) {
if (pool_->idle_threads_ || pool_->waiting_.empty()) {
return idle_start_time_ + kOverseerIdleBeforeExit;
}
if (now < pool_->last_thread_start_time_ + kThreadStartDelay) {
return pool_->last_thread_start_time_ + kThreadStartDelay;
}
if (now < pool_->queue_assignment_time_ + kThreadStartDelay) {
return pool_->queue_assignment_time_ + kThreadStartDelay;
}
auto task_provider = pool_->FindActiveTaskProvider();
if (!task_provider) {
return idle_start_time_ + kOverseerIdleBeforeExit;
}
pool_->StartWorker(std::move(task_provider), now);
idle_start_time_ = now;
return now + kThreadStartDelay;
}
struct SharedThreadPool::Worker {
internal::IntrusivePtr<SharedThreadPool> pool_;
internal::IntrusivePtr<TaskProvider> task_provider_;
void operator()() const;
void WorkerBody();
};
void SharedThreadPool::StartWorker(
internal::IntrusivePtr<TaskProvider> task_provider, absl::Time now) {
last_thread_start_time_ = now;
worker_threads_++;
thread_pool_started.Increment();
tensorstore::internal::Thread::StartDetached(
{"ts_pool_worker"}, Worker{internal::IntrusivePtr<SharedThreadPool>(this),
std::move(task_provider)});
}
void SharedThreadPool::Worker::operator()() const {
const_cast<SharedThreadPool::Worker*>(this)->WorkerBody();
}
void SharedThreadPool::Worker::WorkerBody() {
struct ScopedIncDec {
size_t& x_;
ScopedIncDec(size_t& x) : x_(x) { x_++; }
~ScopedIncDec() { x_--; }
};
thread_pool_active.Increment();
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "Worker: " << this;
while (true) {
if (task_provider_) {
task_provider_->DoWorkOnThread();
task_provider_ = nullptr;
}
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "Idle: " << this;
absl::Time now = absl::Now();
absl::Time deadline = now + kThreadIdleBeforeExit;
{
absl::MutexLock lock(&pool_->mutex_);
ScopedIncDec idle(pool_->idle_threads_);
while (!task_provider_) {
bool active = pool_->mutex_.AwaitWithDeadline(
absl::Condition(
+[](SharedThreadPool* self) ABSL_EXCLUSIVE_LOCKS_REQUIRED(
self->mutex_) { return !self->waiting_.empty(); },
pool_.get()),
deadline);
now = absl::Now();
if (active) {
task_provider_ = pool_->FindActiveTaskProvider();
} else {
deadline = std::max(deadline,
pool_->last_thread_exit_time_ + kThreadExitDelay);
if (deadline < now) {
break;
}
}
}
if (task_provider_) {
pool_->queue_assignment_time_ = now;
} else {
pool_->worker_threads_--;
pool_->last_thread_exit_time_ = now;
break;
}
}
}
thread_pool_active.Decrement();
ABSL_LOG_IF(INFO, thread_pool_logging.Level(1)) << "~Worker: " << this;
}
}
} | #include "tensorstore/internal/thread/pool_impl.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/thread/task.h"
#include "tensorstore/internal/thread/task_provider.h"
namespace {
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal_thread_impl::InFlightTask;
using ::tensorstore::internal_thread_impl::SharedThreadPool;
using ::tensorstore::internal_thread_impl::TaskProvider;
struct SingleTaskProvider : public TaskProvider {
struct private_t {};
public:
static IntrusivePtr<SingleTaskProvider> Make(
IntrusivePtr<SharedThreadPool> pool, std::unique_ptr<InFlightTask> task) {
return MakeIntrusivePtr<SingleTaskProvider>(private_t{}, std::move(pool),
std::move(task));
}
SingleTaskProvider(private_t, IntrusivePtr<SharedThreadPool> pool,
std::unique_ptr<InFlightTask> task)
: pool_(std::move(pool)), task_(std::move(task)) {}
~SingleTaskProvider() override = default;
int64_t EstimateThreadsRequired() override {
absl::MutexLock lock(&mutex_);
flags_ += 2;
return task_ ? 1 : 0;
}
void Trigger() {
pool_->NotifyWorkAvailable(IntrusivePtr<TaskProvider>(this));
}
void DoWorkOnThread() override {
std::unique_ptr<InFlightTask> task;
{
absl::MutexLock lock(&mutex_);
flags_ |= 1;
if (task_) {
task = std::move(task_);
}
}
if (task) {
task->Run();
}
}
IntrusivePtr<SharedThreadPool> pool_;
absl::Mutex mutex_;
std::unique_ptr<InFlightTask> task_ ABSL_GUARDED_BY(mutex_);
int64_t flags_ = 0;
};
TEST(SharedThreadPoolTest, Basic) {
auto pool = MakeIntrusivePtr<SharedThreadPool>();
{
absl::Notification notification;
auto provider = SingleTaskProvider::Make(
pool, std::make_unique<InFlightTask>([&] { notification.Notify(); }));
provider->Trigger();
provider->Trigger();
notification.WaitForNotification();
}
}
TEST(SharedThreadPoolTest, LotsOfProviders) {
auto pool = MakeIntrusivePtr<SharedThreadPool>();
std::vector<IntrusivePtr<SingleTaskProvider>> providers;
providers.reserve(1000);
for (int i = 2; i < 1000; i = i * 2) {
absl::BlockingCounter a(i);
for (int j = 0; j < i; j++) {
providers.push_back(SingleTaskProvider::Make(
pool, std::make_unique<InFlightTask>([&] { a.DecrementCount(); })));
}
for (auto& p : providers) p->Trigger();
a.Wait();
for (auto& p : providers) p->Trigger();
providers.clear();
}
}
} |
692 | cpp | google/tensorstore | thread | tensorstore/internal/thread/thread.cc | tensorstore/internal/thread/thread_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_H_
#define TENSORSTORE_INTERNAL_THREAD_H_
#include <climits>
#include <cstring>
#include <functional>
#include <thread>
#include <utility>
#include "absl/log/absl_check.h"
namespace tensorstore {
namespace internal {
void TrySetCurrentThreadName(const char* name);
class Thread {
public:
using Id = std::thread::id;
struct Options {
const char* name = nullptr;
};
Thread() = default;
template <class Function, class... Args>
explicit Thread(Options options, Function&& f, Args&&... args)
: Thread(private_t{}, options, std::forward<Function>(f),
std::forward<Args>(args)...) {}
Thread(Thread&& other) noexcept = default;
Thread& operator=(Thread&& other) = default;
Thread(const Thread& other) = delete;
Thread& operator=(const Thread& other) = delete;
~Thread() { ABSL_CHECK(!thread_.joinable()); }
template <class Function, class... Args>
static void StartDetached(Options options, Function&& f, Args&&... args) {
Thread(private_t{}, options, std::forward<Function>(f),
std::forward<Args>(args)...)
.thread_.detach();
}
void Join() {
ABSL_CHECK_NE(this_thread_id(), get_id());
thread_.join();
}
Id get_id() const { return thread_.get_id(); }
static Id this_thread_id() { return std::this_thread::get_id(); }
private:
struct private_t {};
template <class Function, class... Args>
Thread(private_t, Options options, Function&& f, Args&&... args)
: thread_(
[name = options.name, fn = std::bind(std::forward<Function>(f),
std::forward<Args>(args)...)] {
TrySetCurrentThreadName(name);
std::move(fn)();
}) {}
std::thread thread_;
};
}
}
#endif
#if defined(__linux__) || defined(__APPLE__)
#include <pthread.h>
#endif
#include <thread>
#include <type_traits>
namespace tensorstore {
namespace internal {
void TrySetCurrentThreadName(const char* name) {
if (name == nullptr) return;
#if defined(__linux__)
pthread_setname_np(pthread_self(), name);
#endif
#if defined(__APPLE__)
pthread_setname_np(name);
#endif
}
}
} | #include "tensorstore/internal/thread/thread.h"
#include <gtest/gtest.h>
namespace {
TEST(ThreadTest, Basic) {
tensorstore::internal::Thread my_thread;
int x = 0;
tensorstore::internal::Thread::Id id[2];
my_thread = tensorstore::internal::Thread({}, [&x, &id]() {
x = 1;
id[1] = tensorstore::internal::Thread::this_thread_id();
});
id[0] = my_thread.get_id();
my_thread.Join();
EXPECT_EQ(id[0], id[1]);
EXPECT_EQ(1, x);
}
} |
693 | cpp | google/tensorstore | schedule_at | tensorstore/internal/thread/schedule_at.cc | tensorstore/internal/thread/schedule_at_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_SCHEDULE_AT_H_
#define TENSORSTORE_INTERNAL_THREAD_SCHEDULE_AT_H_
#include "absl/functional/any_invocable.h"
#include "absl/time/time.h"
#include "tensorstore/util/stop_token.h"
namespace tensorstore {
namespace internal {
void ScheduleAt(absl::Time target_time, absl::AnyInvocable<void() &&> task,
const StopToken& stop_token = {});
}
}
#endif
#include "tensorstore/internal/thread/schedule_at.h"
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <iterator>
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/value.h"
#include "tensorstore/internal/tagged_ptr.h"
#include "tensorstore/internal/thread/thread.h"
#include "tensorstore/internal/tracing/tracing.h"
#include "tensorstore/util/stop_token.h"
namespace tensorstore {
namespace internal {
namespace {
using ScheduleAtTask = absl::AnyInvocable<void() &&>;
auto& schedule_at_queued_ops = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/internal/thread/schedule_at/queued_ops",
"Operations in flight on the schedule_at thread");
auto& schedule_at_next_event = internal_metrics::Value<absl::Time>::New(
"/tensorstore/internal/thread/schedule_at/next_event",
"Time of the next in-flight schedule_at operation");
auto& schedule_at_insert_histogram_ms =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/internal/thread/schedule_at/insert_histogram_ms",
"Histogram of schedule_at insert delays (ms)");
class DeadlineTaskQueue;
using TaggedQueuePointer = TaggedPtr<DeadlineTaskQueue, 1>;
struct DeadlineTaskNode;
using DeadlineTaskTree = intrusive_red_black_tree::Tree<DeadlineTaskNode>;
struct DeadlineTaskStopCallback {
DeadlineTaskNode& node;
void operator()() const;
};
struct DeadlineTaskNode : public DeadlineTaskTree::NodeBase {
DeadlineTaskNode(absl::Time deadline, ScheduleAtTask&& task,
const StopToken& token)
: deadline(deadline),
task(std::move(task)),
trace_context(internal_tracing::TraceContext::kThread),
queue(TaggedQueuePointer{}),
stop_callback(token, DeadlineTaskStopCallback{*this}) {}
void RunAndDelete();
absl::Time deadline;
ScheduleAtTask task;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS internal_tracing::TraceContext trace_context;
std::atomic<TaggedQueuePointer> queue;
StopCallback<DeadlineTaskStopCallback> stop_callback;
};
using RunImmediatelyQueueAccessor =
intrusive_red_black_tree::LinkedListAccessor<DeadlineTaskNode>;
class DeadlineTaskQueue {
public:
explicit DeadlineTaskQueue()
: run_immediately_queue_(nullptr),
next_wakeup_(absl::InfinitePast()),
woken_up_(absl::InfinitePast()),
thread_({"TensorstoreScheduleAt"}, &DeadlineTaskQueue::Run, this) {}
~DeadlineTaskQueue() { ABSL_UNREACHABLE(); }
void ScheduleAt(absl::Time target_time, ScheduleAtTask task,
const StopToken& stop_token);
void Run();
private:
friend struct DeadlineTaskNode;
friend struct DeadlineTaskStopCallback;
void TryRemove(DeadlineTaskNode& node);
absl::Mutex mutex_;
absl::CondVar cond_var_;
DeadlineTaskTree tree_ ABSL_GUARDED_BY(mutex_);
DeadlineTaskNode* run_immediately_queue_ ABSL_GUARDED_BY(mutex_);
absl::Time next_wakeup_ ABSL_GUARDED_BY(mutex_);
absl::Time woken_up_ ABSL_GUARDED_BY(mutex_);
Thread thread_;
};
void DeadlineTaskQueue::ScheduleAt(absl::Time target_time, ScheduleAtTask task,
const StopToken& stop_token) {
schedule_at_queued_ops.Increment();
schedule_at_insert_histogram_ms.Observe(
absl::ToInt64Milliseconds(target_time - absl::Now()));
auto node = std::make_unique<DeadlineTaskNode>(target_time, std::move(task),
stop_token);
absl::MutexLock l(&mutex_);
auto tagged_queue_ptr = node->queue.exchange(TaggedQueuePointer(this));
if (tagged_queue_ptr.tag()) {
return;
}
if (target_time <= woken_up_) {
RunImmediatelyQueueAccessor{}.SetNext(node.get(), nullptr);
if (run_immediately_queue_) {
RunImmediatelyQueueAccessor{}.SetNext(
RunImmediatelyQueueAccessor{}.GetPrev(run_immediately_queue_),
node.get());
RunImmediatelyQueueAccessor{}.SetPrev(run_immediately_queue_, node.get());
} else {
run_immediately_queue_ = node.get();
RunImmediatelyQueueAccessor{}.SetPrev(node.get(), node.get());
}
if (next_wakeup_ != absl::InfinitePast()) {
next_wakeup_ = absl::InfinitePast();
cond_var_.Signal();
}
node.release();
return;
}
tree_.FindOrInsert(
[&](DeadlineTaskNode& other) {
return target_time < other.deadline ? absl::weak_ordering::less
: absl::weak_ordering::greater;
},
[&] { return node.release(); });
if (target_time < next_wakeup_) {
next_wakeup_ = target_time;
cond_var_.Signal();
}
}
void DeadlineTaskQueue::Run() {
while (true) {
DeadlineTaskTree runnable;
DeadlineTaskNode* run_immediately = nullptr;
{
absl::MutexLock l(&mutex_);
do {
run_immediately = std::exchange(run_immediately_queue_, nullptr);
if (!run_immediately) {
next_wakeup_ =
tree_.empty() ? absl::InfiniteFuture() : tree_.begin()->deadline;
schedule_at_next_event.Set(next_wakeup_);
cond_var_.WaitWithDeadline(&mutex_, next_wakeup_);
}
auto woken_up = woken_up_ = std::max(woken_up_, absl::Now());
auto split_result = tree_.FindSplit([&](DeadlineTaskNode& node) {
return node.deadline <= woken_up ? absl::weak_ordering::greater
: absl::weak_ordering::less;
});
runnable = std::move(split_result.trees[0]);
tree_ = std::move(split_result.trees[1]);
} while (runnable.empty() && !run_immediately);
next_wakeup_ = absl::InfinitePast();
}
internal_tracing::TraceContext base =
internal_tracing::TraceContext(internal_tracing::TraceContext::kThread);
while (run_immediately) {
auto* next = RunImmediatelyQueueAccessor{}.GetNext(run_immediately);
run_immediately->RunAndDelete();
run_immediately = next;
}
for (DeadlineTaskTree::iterator it = runnable.begin(), next;
it != runnable.end(); it = next) {
next = std::next(it);
runnable.Remove(*it);
it->RunAndDelete();
}
internal_tracing::SwapCurrentTraceContext(&base);
}
}
void DeadlineTaskNode::RunAndDelete() {
schedule_at_queued_ops.Decrement();
if (queue.load(std::memory_order_relaxed).tag()) {
} else {
internal_tracing::SwapCurrentTraceContext(&trace_context);
std::move(task)();
}
delete this;
}
void DeadlineTaskStopCallback::operator()() const {
auto tagged_queue_ptr = node.queue.exchange(TaggedQueuePointer{nullptr, 1});
auto* queue_ptr = tagged_queue_ptr.get();
if (!queue_ptr) {
return;
}
queue_ptr->TryRemove(node);
}
void DeadlineTaskQueue::TryRemove(DeadlineTaskNode& node) {
{
absl::MutexLock lock(&mutex_);
if (node.deadline <= woken_up_) {
return;
}
tree_.Remove(node);
}
delete &node;
schedule_at_queued_ops.Decrement();
}
}
void ScheduleAt(absl::Time target_time, ScheduleAtTask task,
const StopToken& stop_token) {
static absl::NoDestructor<DeadlineTaskQueue> g_queue;
g_queue->ScheduleAt(std::move(target_time), std::move(task), stop_token);
}
}
} | #include "tensorstore/internal/thread/schedule_at.h"
#include <memory>
#include <thread>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/util/stop_token.h"
namespace {
using ::tensorstore::StopSource;
using ::tensorstore::internal::ScheduleAt;
TEST(ScheduleAtTest, Basic) {
absl::Notification a, b;
auto now = absl::Now();
ScheduleAt(now + absl::Milliseconds(1), [&] { a.Notify(); });
ScheduleAt(now + absl::Milliseconds(5), [&] { b.Notify(); });
EXPECT_FALSE(b.HasBeenNotified());
b.WaitForNotification();
EXPECT_TRUE(a.HasBeenNotified());
}
TEST(ScheduleAtTest, RunImmediately) {
auto notification = std::make_shared<absl::Notification>();
ScheduleAt(absl::InfinitePast(), [=] { notification->Notify(); });
notification->WaitForNotification();
}
TEST(ScheduleAtTest, RunMultipleImmediately) {
auto notification = std::make_shared<absl::Notification>();
ScheduleAt(absl::Now(), [=] { notification->WaitForNotification(); });
auto notification1 = std::make_shared<absl::Notification>();
auto notification2 = std::make_shared<absl::Notification>();
ScheduleAt(absl::InfinitePast(), [=] {
EXPECT_FALSE(notification2->HasBeenNotified());
notification1->Notify();
});
ScheduleAt(absl::InfinitePast(), [=] { notification2->Notify(); });
notification->Notify();
notification1->WaitForNotification();
notification2->WaitForNotification();
}
TEST(ScheduleAtTest, Cancel) {
auto notification = std::make_shared<absl::Notification>();
EXPECT_EQ(1, notification.use_count());
StopSource stop_source;
ScheduleAt(
absl::InfiniteFuture(), [notification] { notification->Notify(); },
stop_source.get_token());
EXPECT_EQ(2, notification.use_count());
stop_source.request_stop();
EXPECT_EQ(1, notification.use_count());
EXPECT_FALSE(notification->HasBeenNotified());
}
TEST(ScheduleAtTest, CancelImmediately) {
auto notification = std::make_shared<absl::Notification>();
EXPECT_EQ(1, notification.use_count());
StopSource stop_source;
stop_source.request_stop();
ScheduleAt(
absl::InfinitePast(), [notification] { notification->Notify(); },
stop_source.get_token());
EXPECT_EQ(1, notification.use_count());
EXPECT_FALSE(notification->HasBeenNotified());
}
TEST(ScheduleAtTest, CancelWhileRunning) {
auto notification1 = std::make_shared<absl::Notification>();
StopSource stop_source;
ScheduleAt(absl::InfinitePast(), [=] {
notification1->WaitForNotification();
stop_source.request_stop();
});
auto notification2 = std::make_shared<absl::Notification>();
auto notification3 = std::make_shared<absl::Notification>();
ScheduleAt(
absl::InfinitePast(), [=] { notification2->Notify(); },
stop_source.get_token());
ScheduleAt(absl::InfinitePast(), [=] { notification3->Notify(); });
notification1->Notify();
notification3->WaitForNotification();
EXPECT_FALSE(notification2->HasBeenNotified());
EXPECT_EQ(1, notification2.use_count());
}
} |
694 | cpp | google/tensorstore | thread_pool | tensorstore/internal/thread/thread_pool.cc | tensorstore/internal/thread/thread_pool_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_THREAD_POOL_H_
#define TENSORSTORE_INTERNAL_THREAD_THREAD_POOL_H_
#include <stddef.h>
#include "tensorstore/util/executor.h"
namespace tensorstore {
namespace internal {
Executor DetachedThreadPool(size_t num_threads);
}
}
#endif
#include "tensorstore/internal/thread/thread_pool.h"
#include <stddef.h>
#include <cassert>
#include <limits>
#include <memory>
#include <thread>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/thread/pool_impl.h"
#include "tensorstore/internal/thread/task.h"
#include "tensorstore/internal/thread/task_group_impl.h"
#include "tensorstore/util/executor.h"
namespace tensorstore {
namespace internal {
namespace {
Executor DefaultThreadPool(size_t num_threads) {
static absl::NoDestructor<internal_thread_impl::SharedThreadPool> pool_;
intrusive_ptr_increment(pool_.get());
if (num_threads == 0 || num_threads == std::numeric_limits<size_t>::max()) {
num_threads = std::thread::hardware_concurrency() * 16;
if (num_threads == 0) num_threads = 1024;
ABSL_LOG_FIRST_N(INFO, 1)
<< "DetachedThreadPool should specify num_threads; using "
<< num_threads;
}
auto task_group = internal_thread_impl::TaskGroup::Make(
internal::IntrusivePtr<internal_thread_impl::SharedThreadPool>(
pool_.get()),
num_threads);
return [task_group = std::move(task_group)](ExecutorTask task) {
task_group->AddTask(
std::make_unique<internal_thread_impl::InFlightTask>(std::move(task)));
};
}
}
Executor DetachedThreadPool(size_t num_threads) {
return DefaultThreadPool(num_threads);
}
}
} | #include "tensorstore/internal/thread/thread_pool.h"
#include <string>
#include "absl/flags/commandlineflag.h"
#include "absl/flags/reflection.h"
void SetupThreadPoolTestEnv() {
}
#include "tensorstore/internal/thread/thread_pool_test.inc" |
695 | cpp | google/tensorstore | std_variant | tensorstore/internal/json_binding/std_variant.cc | tensorstore/internal/json_binding/std_variant_test.cc | #ifndef TENSORSTORE_SERIALIZATION_STD_VARIANT_H_
#define TENSORSTORE_SERIALIZATION_STD_VARIANT_H_
#include <variant>
#include "tensorstore/serialization/serialization.h"
namespace tensorstore {
namespace serialization {
template <typename... T>
struct Serializer<std::variant<T...>> {
[[nodiscard]] static bool Encode(EncodeSink& sink,
const std::variant<T...>& value) {
return serialization::WriteSize(sink.writer(), value.index()) &&
std::visit(
[&sink](auto& x) { return serialization::Encode(sink, x); },
value);
}
[[nodiscard]] static bool Decode(DecodeSource& source,
std::variant<T...>& value) {
size_t index;
if (!serialization::ReadSize(source.reader(), index)) return false;
if (index >= sizeof...(T)) {
source.Fail(absl::DataLossError("Invalid variant index"));
return false;
}
return DecodeImpl(source, value, index, std::index_sequence_for<T...>{});
}
template <size_t... Is>
[[nodiscard]] static bool DecodeImpl(DecodeSource& source,
std::variant<T...>& value, size_t index,
std::index_sequence<Is...>) {
return ((index == Is
? serialization::Decode(source, value.template emplace<Is>())
: true) &&
...);
}
constexpr static bool non_serializable() {
return (IsNonSerializableLike<T> || ...);
}
};
}
}
#endif
#include <stddef.h>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_json_binding {
absl::Status GetVariantErrorStatus(span<const absl::Status> status_values) {
std::string error = "No matching value binder: ";
for (size_t i = 0; i < status_values.size(); ++i) {
if (i != 0) error += "; ";
error += status_values[i].message();
}
return absl::InvalidArgumentError(error);
}
}
} | #include "tensorstore/internal/json_binding/std_variant.h"
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = ::tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(JsonBindingTest, VariantDefaultBinder) {
tensorstore::TestJsonBinderRoundTrip<std::variant<int, std::string>>({
{3, ::nlohmann::json(3)},
{"abc", ::nlohmann::json("abc")},
});
}
TEST(JsonBindingTest, VariantDefaultBinderError) {
EXPECT_THAT(
(jb::FromJson<std::variant<int, std::string>>(::nlohmann::json(false))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"No matching value binder: "
"Expected integer in the range .*, but received: false; "
"Expected string, but received: false"));
}
TEST(JsonBindingTest, VariantExplicitBinder) {
auto binder = jb::Object(jb::Variant(jb::Member("a"), jb::Member("b")));
tensorstore::TestJsonBinderRoundTrip<std::variant<int, std::string>>(
{
{3, {{"a", 3}}},
{"abc", {{"b", "abc"}}},
},
binder);
}
} |
696 | cpp | google/tensorstore | raw_bytes_hex | tensorstore/internal/json_binding/raw_bytes_hex.cc | tensorstore/internal/json_binding/raw_bytes_hex_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_RAW_BYTES_HEX_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_RAW_BYTES_HEX_H_
#include <cstddef>
#include <type_traits>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/json_serialization_options_base.h"
namespace tensorstore {
namespace internal_json_binding {
namespace raw_bytes_hex_binder {
struct RawBytesHexImpl {
size_t num_bytes;
absl::Status operator()(std::true_type is_loading, NoOptions, void* obj,
::nlohmann::json* j) const;
absl::Status operator()(std::false_type is_loading, NoOptions,
const void* obj, ::nlohmann::json* j) const;
};
constexpr auto RawBytesHex = [](auto is_loading, NoOptions options, auto* obj,
auto* j) -> absl::Status {
using T = internal::remove_cvref_t<decltype(*obj)>;
static_assert(std::is_trivially_destructible_v<T>);
return RawBytesHexImpl{sizeof(T)}(is_loading, options, obj, j);
};
}
using raw_bytes_hex_binder::RawBytesHex;
}
}
#endif
#include "tensorstore/internal/json_binding/raw_bytes_hex.h"
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
namespace tensorstore {
namespace internal_json_binding {
namespace {
bool IsHexString(std::string_view s) {
for (char c : s) {
if (!(c >= '0' && c <= '9') && !(c >= 'a' && c <= 'f') &&
!(c >= 'A' && c <= 'F')) {
return false;
}
}
return true;
}
}
namespace raw_bytes_hex_binder {
absl::Status RawBytesHexImpl::operator()(std::true_type is_loading, NoOptions,
void* obj, ::nlohmann::json* j) const {
auto* s = j->get_ptr<const std::string*>();
if (!s || s->size() != 2 * num_bytes ||
!internal_json_binding::IsHexString(*s)) {
return absl::InvalidArgumentError(
absl::StrFormat("Expected string with %d hex digits, but received: %s",
num_bytes * 2, j->dump()));
}
std::string temp = absl::HexStringToBytes(*s);
assert(temp.size() == num_bytes);
std::memcpy(obj, temp.data(), num_bytes);
return absl::OkStatus();
}
absl::Status RawBytesHexImpl::operator()(std::false_type is_loading, NoOptions,
const void* obj,
::nlohmann::json* j) const {
*j = absl::BytesToHexString(
absl::string_view(reinterpret_cast<const char*>(obj), num_bytes));
return absl::OkStatus();
}
}
}
} | #include "tensorstore/internal/json_binding/raw_bytes_hex.h"
#include <string>
#include <tuple>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(RawBytesHexTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTrip<std::array<unsigned char, 3>>(
{
{{{1, 2, 0xab}}, "0102ab"},
},
jb::RawBytesHex);
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<
std::array<unsigned char, 3>>(
{
{"0102AB", "0102ab"},
},
jb::RawBytesHex);
}
TEST(RawBytesHexTest, Invalid) {
tensorstore::TestJsonBinderFromJson<std::array<unsigned char, 3>>(
{
{1,
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string with 6 hex digits, but received: 1")},
{"0102zb", MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string with 6 hex "
"digits, but received: \"0102zb\"")},
},
jb::RawBytesHex);
}
} |
697 | cpp | google/tensorstore | staleness_bound | tensorstore/internal/json_binding/staleness_bound.cc | tensorstore/internal/json_binding/staleness_bound_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_STALENESS_BOUND_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_STALENESS_BOUND_H_
#include "absl/status/status.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/staleness_bound.h"
namespace tensorstore {
namespace internal {
TENSORSTORE_DECLARE_JSON_BINDER(StalenessBoundJsonBinder, StalenessBound);
}
namespace internal_json_binding {
template <>
inline constexpr auto DefaultBinder<StalenessBound> =
internal::StalenessBoundJsonBinder;
}
}
#endif
#include "tensorstore/internal/json_binding/staleness_bound.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
namespace tensorstore {
namespace internal {
TENSORSTORE_DEFINE_JSON_BINDER(
StalenessBoundJsonBinder,
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
if (const auto* b = j->get_ptr<const bool*>()) {
*obj = *b ? absl::InfiniteFuture() : absl::InfinitePast();
} else if (j->is_number()) {
const double t = static_cast<double>(*j);
*obj = absl::UnixEpoch() + absl::Seconds(t);
} else if (*j == "open") {
obj->time = absl::InfiniteFuture();
obj->bounded_by_open_time = true;
} else {
return internal_json::ExpectedError(*j,
"boolean, number, or \"open\"");
}
} else {
if (obj->bounded_by_open_time) {
*j = "open";
} else {
const absl::Time& t = obj->time;
if (t == absl::InfiniteFuture()) {
*j = true;
} else if (t == absl::InfinitePast()) {
*j = false;
} else {
*j = absl::ToDoubleSeconds(t - absl::UnixEpoch());
}
}
}
return absl::OkStatus();
})
}
} | #include "tensorstore/internal/json_binding/staleness_bound.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/staleness_bound.h"
using ::tensorstore::MatchesJson;
using ::tensorstore::StalenessBound;
using ::testing::Optional;
namespace {
TEST(StalenessBoundJsonBinderTest, RoundTrip) {
tensorstore::TestJsonBinderToJson<StalenessBound>({
{StalenessBound{absl::InfinitePast()}, Optional(MatchesJson(false))},
{StalenessBound{absl::InfiniteFuture()}, Optional(MatchesJson(true))},
{StalenessBound::BoundedByOpen(), Optional(MatchesJson("open"))},
{StalenessBound{absl::UnixEpoch()}, Optional(MatchesJson(0))},
{StalenessBound{absl::UnixEpoch() + absl::Seconds(1)},
Optional(MatchesJson(1))},
});
}
TEST(StalenessBoundJsonBinderTest, FromJson) {
tensorstore::TestJsonBinderFromJson<StalenessBound>({
{false,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time, absl::InfinitePast()),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{true,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time, absl::InfiniteFuture()),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{"open", ::testing::Optional(::testing::Field(
&StalenessBound::bounded_by_open_time, true))},
{0, ::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time, absl::UnixEpoch()),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{1, ::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time,
absl::UnixEpoch() + absl::Seconds(1)),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{1u,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time,
absl::UnixEpoch() + absl::Seconds(1)),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
{1.5,
::testing::Optional(::testing::AllOf(
::testing::Field(&StalenessBound::time,
absl::UnixEpoch() + absl::Milliseconds(1500)),
::testing::Field(&StalenessBound::bounded_by_open_time, false)))},
});
}
} |
698 | cpp | google/tensorstore | proto_util | tensorstore/proto/proto_util.cc | tensorstore/proto/proto_util_test.cc | #ifndef TENSORSTORE_PROTO_PROTO_UTIL_H_
#define TENSORSTORE_PROTO_PROTO_UTIL_H_
#include <string>
#include <string_view>
#include <vector>
#include "google/protobuf/message.h"
namespace tensorstore {
bool TryParseTextProto(std::string_view asciipb, google::protobuf::Message* msg,
std::vector<std::string>* errors = nullptr,
bool allow_partial_messages = true,
bool allow_unknown_extensions = false);
std::string ConciseDebugString(const google::protobuf::Message& message);
}
#endif
#include "tensorstore/proto/proto_util.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "google/protobuf/io/tokenizer.h"
#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
namespace tensorstore {
namespace {
class ErrorCollector : public google::protobuf::io::ErrorCollector {
public:
ErrorCollector() = default;
~ErrorCollector() override = default;
void RecordError(int line, google::protobuf::io::ColumnNumber column,
absl::string_view message) override {
errors.emplace_back(absl::StrCat("Line: ", std::max(1, line + 1),
", col: ", column + 1, ": ", message));
}
void RecordWarning(int line, google::protobuf::io::ColumnNumber column,
absl::string_view message) override {
errors.emplace_back(absl::StrCat("Line: ", std::max(1, line + 1),
", col: ", column + 1, ": ", message));
}
std::vector<std::string> errors;
};
class ConcisePrinter : public google::protobuf::TextFormat::FastFieldValuePrinter {
public:
void PrintString(
const std::string& val,
google::protobuf::TextFormat::BaseTextGenerator* generator) const override {
if (val.size() <= 80) {
FastFieldValuePrinter::PrintString(val, generator);
return;
}
std::string output = absl::StrFormat("<%d bytes: ", val.size());
for (size_t i = 0; i < 8; i++) {
absl::StrAppendFormat(&output, "\\x%02x", val[i]);
}
absl::StrAppend(&output, "...>");
generator->PrintString(output);
}
};
}
bool TryParseTextProto(absl::string_view asciipb, google::protobuf::Message* msg,
std::vector<std::string>* errors,
bool allow_partial_messages,
bool allow_unknown_extensions) {
google::protobuf::TextFormat::Parser parser;
parser.AllowPartialMessage(allow_partial_messages);
parser.AllowUnknownExtension(allow_unknown_extensions);
ErrorCollector error_collector;
parser.RecordErrorsTo(&error_collector);
google::protobuf::io::ArrayInputStream asciipb_istream(asciipb.data(), asciipb.size());
if (parser.Parse(&asciipb_istream, msg)) {
return true;
}
msg->Clear();
if (errors) {
*errors = std::move(error_collector.errors);
}
return false;
}
std::string ConciseDebugString(const google::protobuf::Message& message) {
google::protobuf::TextFormat::Printer printer;
printer.SetDefaultFieldValuePrinter(new ConcisePrinter());
printer.SetSingleLineMode(true);
printer.SetExpandAny(true);
std::string debugstring;
printer.PrintToString(message, &debugstring);
if (!debugstring.empty() && debugstring.back() == ' ') {
debugstring.pop_back();
}
return debugstring;
}
} | #include "tensorstore/proto/proto_util.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::ConciseDebugString;
using ::tensorstore::TryParseTextProto;
TEST(ProtoUtilTest, Basic) {
constexpr const char kProto[] = R"pb(
dtype: "int64"
shape: [ 1, 2, 4 ]
int_data: [ 1, 0, 2, 2, 4, 5, 6, 7 ]
)pb";
::tensorstore::proto::Array proto;
EXPECT_TRUE(TryParseTextProto(kProto, &proto));
EXPECT_THAT(proto, EqualsProto(kProto));
std::vector<std::string> errors;
EXPECT_FALSE(TryParseTextProto("a: 'foo'", &proto, &errors));
EXPECT_FALSE(errors.empty());
}
TEST(ProtoUtilTest, ConciseDebugString) {
::tensorstore::proto::Array proto;
proto.set_dtype("int64");
proto.set_void_data(
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}");
EXPECT_EQ(
"dtype: \"int64\" "
"void_data: <256 bytes: \\x7b\\x30\\x31\\x32\\x33\\x34\\x35\\x36...>",
ConciseDebugString(proto));
}
} |
699 | cpp | google/tensorstore | encode_time | tensorstore/proto/encode_time.cc | tensorstore/proto/encode_time_test.cc | #ifndef TENSORSTORE_PROTO_ENCODE_TIME_H_
#define TENSORSTORE_PROTO_ENCODE_TIME_H_
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "absl/time/time.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
void AbslTimeToProto(absl::Time t, google::protobuf::Timestamp* proto);
Result<absl::Time> ProtoToAbslTime(const google::protobuf::Timestamp& proto);
void AbslDurationToProto(absl::Duration d, google::protobuf::Duration* proto);
Result<absl::Duration> ProtoToAbslDuration(
const google::protobuf::Duration& proto);
}
}
#endif
#include "tensorstore/proto/encode_time.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal {
void AbslTimeToProto(absl::Time t, google::protobuf::Timestamp* proto) {
if (t == absl::InfiniteFuture()) {
proto->set_seconds(0x7FFFFFFFFFFFFFFFll);
proto->set_nanos(0);
} else if (t == absl::InfinitePast()) {
proto->set_seconds(0x8000000000000000ll);
proto->set_nanos(0);
} else {
const int64_t s = absl::ToUnixSeconds(t);
const int64_t n = (t - absl::FromUnixSeconds(s)) / absl::Nanoseconds(1);
proto->set_seconds(s);
proto->set_nanos(n);
}
}
tensorstore::Result<absl::Time> ProtoToAbslTime(
const google::protobuf::Timestamp& proto) {
const auto sec = proto.seconds();
const auto ns = proto.nanos();
if (sec == 0x7FFFFFFFFFFFFFFFll) {
return absl::InfiniteFuture();
}
if (sec == 0x8000000000000000ll) {
return absl::InfinitePast();
}
if (sec < -62135596800 || sec > 253402300799) {
return absl::InvalidArgumentError(tensorstore::StrCat("seconds=", sec));
}
if (ns < 0 || ns > 999999999) {
return absl::InvalidArgumentError(tensorstore::StrCat("nanos=", ns));
}
return absl::FromUnixSeconds(sec) + absl::Nanoseconds(ns);
}
void AbslDurationToProto(absl::Duration d, google::protobuf::Duration* proto) {
if (d == absl::InfiniteDuration()) {
proto->set_seconds(0x7FFFFFFFFFFFFFFFll);
proto->set_nanos(0);
} else if (d == -absl::InfiniteDuration()) {
proto->set_seconds(0x8000000000000000ll);
proto->set_nanos(0);
} else {
const int64_t s = absl::IDivDuration(d, absl::Seconds(1), &d);
const int64_t n = absl::IDivDuration(d, absl::Nanoseconds(1), &d);
proto->set_seconds(s);
proto->set_nanos(n);
}
}
Result<absl::Duration> ProtoToAbslDuration(
const google::protobuf::Duration& proto) {
const auto sec = proto.seconds();
if (sec == 0x7FFFFFFFFFFFFFFFll) {
return absl::InfiniteDuration();
}
if (sec == 0x8000000000000000ll) {
return -absl::InfiniteDuration();
}
const auto ns = proto.nanos();
if (sec < -315576000000 || sec > 315576000000) {
return absl::InvalidArgumentError(tensorstore::StrCat("seconds=", sec));
}
if (ns < -999999999 || ns > 999999999) {
return absl::InvalidArgumentError(tensorstore::StrCat("nanos=", ns));
}
if ((sec < 0 && ns > 0) || (sec > 0 && ns < 0)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Sign mismatch between seconds=", sec, ", nanos=", ns));
}
return absl::Seconds(sec) + absl::Nanoseconds(ns);
}
}
} | #include "tensorstore/proto/encode_time.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include <gtest/gtest.h>
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::internal::AbslDurationToProto;
using ::tensorstore::internal::AbslTimeToProto;
using ::tensorstore::internal::ProtoToAbslDuration;
using ::tensorstore::internal::ProtoToAbslTime;
TEST(EncodeTimestamp, Basic) {
auto roundtrip = [](absl::Time ts) {
google::protobuf::Timestamp proto;
AbslTimeToProto(ts, &proto);
return ProtoToAbslTime(proto);
};
tensorstore::Result<absl::Time> result;
result = roundtrip(absl::InfinitePast());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(absl::InfinitePast(), *result);
result = roundtrip(absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(absl::InfiniteFuture(), *result);
auto now = absl::Now();
result = roundtrip(now);
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(now, *result);
}
TEST(EncodeDuration, Basic) {
auto roundtrip = [](absl::Duration d) {
google::protobuf::Duration proto;
AbslDurationToProto(d, &proto);
return ProtoToAbslDuration(proto);
};
auto test_roundtrip = [&](absl::Duration d) {
SCOPED_TRACE(tensorstore::StrCat("duration=", d));
EXPECT_THAT(roundtrip(d), ::testing::Optional(d));
};
test_roundtrip(absl::InfiniteDuration());
test_roundtrip(-absl::InfiniteDuration());
test_roundtrip(absl::Seconds(5));
test_roundtrip(absl::Seconds(-5));
test_roundtrip(absl::ZeroDuration());
test_roundtrip(absl::Milliseconds(12345));
test_roundtrip(absl::Milliseconds(-12345));
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.