ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
700 | cpp | google/tensorstore | proto_binder | tensorstore/proto/proto_binder.cc | tensorstore/proto/proto_binder_test.cc | #ifndef TENSORSTORE_PROTO_PROTO_BINDER_H_
#define TENSORSTORE_PROTO_PROTO_BINDER_H_
#include <type_traits>
#include "absl/status/status.h"
#include "google/protobuf/message.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
namespace tensorstore {
namespace internal_json_binding {
struct JsonProtoBinderBase {
absl::Status operator()(std::true_type ,
const NoOptions& options, google::protobuf::Message* obj,
::nlohmann::json* j) const;
absl::Status operator()(std::false_type ,
const NoOptions& options, const google::protobuf::Message* obj,
::nlohmann::json* j) const;
};
struct AsciiProtoBinderBase {
absl::Status operator()(std::true_type ,
const NoOptions& options, google::protobuf::Message* obj,
::nlohmann::json* j) const;
absl::Status operator()(std::false_type ,
const NoOptions& options, const google::protobuf::Message* obj,
::nlohmann::json* j) const;
};
template <typename MessageType>
struct JsonProtoBinder : private JsonProtoBinderBase {
inline absl::Status operator()(std::true_type ,
const NoOptions& options, MessageType* obj,
::nlohmann::json* j) const {
return JsonProtoBinderBase::operator()(std::true_type{}, options, obj, j);
}
inline absl::Status operator()(std::false_type ,
const NoOptions& options,
const MessageType* obj,
::nlohmann::json* j) const {
return JsonProtoBinderBase::operator()(std::false_type{}, options, obj, j);
}
};
template <typename MessageType>
struct AsciiProtoBinder : private AsciiProtoBinderBase {
inline absl::Status operator()(std::true_type ,
const NoOptions& options, MessageType* obj,
::nlohmann::json* j) const {
return AsciiProtoBinderBase::operator()(std::true_type{}, options, obj, j);
}
inline absl::Status operator()(std::false_type ,
const NoOptions& options,
const MessageType* obj,
::nlohmann::json* j) const {
return AsciiProtoBinderBase::operator()(std::false_type{}, options, obj, j);
}
};
}
}
#endif
#include "tensorstore/proto/proto_binder.h"
#include <string>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
#include "google/protobuf/util/json_util.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/proto/proto_util.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
absl::Status JsonProtoBinderBase::operator()(std::true_type ,
const NoOptions& options,
google::protobuf::Message* obj,
::nlohmann::json* j) const {
if (!j->template get_ptr<::nlohmann::json::object_t*>()) {
return internal_json::ExpectedError(*j, "object");
}
std::string json_ascii = j->dump();
auto status = google::protobuf::util::JsonStringToMessage(json_ascii, obj);
if (status.ok()) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected JSON protocol buffer ", obj->GetDescriptor()->name(),
" object, but received ", j->dump(), " with error ",
std::string_view(status.message().data(), status.message().size())));
}
absl::Status JsonProtoBinderBase::operator()(std::false_type ,
const NoOptions& options,
const google::protobuf::Message* obj,
::nlohmann::json* j) const {
std::string json_ascii;
auto status = google::protobuf::util::MessageToJsonString(*obj, &json_ascii);
if (!status.ok()) {
return absl::InternalError(
std::string_view(status.message().data(), status.message().size()));
}
auto j_parse = ::nlohmann::json::parse(json_ascii, nullptr, false);
if (j_parse.template get_ptr<::nlohmann::json::object_t*>()) {
*j = std::move(j_parse);
return absl::OkStatus();
}
return absl::InternalError("Failed to serialize field as JSON proto");
}
absl::Status AsciiProtoBinderBase::operator()(std::true_type,
const NoOptions& options,
google::protobuf::Message* obj,
::nlohmann::json* j) const {
auto* str = j->template get_ptr<const std::string*>();
if (!str) {
return internal_json::ExpectedError(*j, "string");
}
if (TryParseTextProto(*str, obj)) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ASCII protocol buffer ", obj->GetDescriptor()->name(),
" object, but received ", *str));
}
absl::Status AsciiProtoBinderBase::operator()(std::false_type,
const NoOptions& options,
const google::protobuf::Message* obj,
::nlohmann::json* j) const {
std::string obj_text;
google::protobuf::TextFormat::PrintToString(*obj, &obj_text);
*j = obj_text;
return absl::OkStatus();
}
}
} | #include "tensorstore/proto/proto_binder.h"
#include <string>
#include <type_traits>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::JsonSerializationOptions;
using ::tensorstore::internal_json_binding::AsciiProtoBinder;
using ::tensorstore::internal_json_binding::JsonProtoBinder;
static inline constexpr JsonProtoBinder<::tensorstore::proto::Array>
ArrayJsonBinder = {};
static inline constexpr AsciiProtoBinder<::tensorstore::proto::Array>
ArrayAsciiBinder = {};
constexpr const char kProto[] = R"(dtype: "int64"
shape: 1
shape: 2
shape: 4
int_data: 1
int_data: 0
int_data: 2
int_data: 2
int_data: 4
int_data: 5
int_data: 6
int_data: 7
)";
TEST(ProtoBinderTest, Ascii) {
JsonSerializationOptions options;
::tensorstore::proto::Array proto;
::nlohmann::json j = std::string(kProto);
EXPECT_TRUE(ArrayAsciiBinder(std::true_type{}, options, &proto, &j).ok());
EXPECT_THAT(proto, EqualsProto(kProto));
::nlohmann::json out;
EXPECT_TRUE(ArrayAsciiBinder(std::false_type{}, options, &proto, &out).ok());
ASSERT_TRUE(out.get_ptr<const std::string*>());
EXPECT_EQ(*out.get_ptr<const std::string*>(), kProto);
}
TEST(ProtoBinderTest, Json) {
JsonSerializationOptions options;
::tensorstore::proto::Array proto;
::nlohmann::json j = ::nlohmann::json{{"dtype", "int64"},
{"shape", {1, 2, 4}},
{"int_data", {1, 0, 2, 2, 4, 5, 6, 7}}};
EXPECT_TRUE(ArrayJsonBinder(std::true_type{}, options, &proto, &j).ok());
EXPECT_THAT(proto, EqualsProto(kProto));
::nlohmann::json out;
EXPECT_TRUE(ArrayJsonBinder(std::false_type{}, options, &proto, &out).ok());
::nlohmann::json expected{
{"dtype", "int64"},
{"shape", {"1", "2", "4"}},
{"intData", {"1", "0", "2", "2", "4", "5", "6", "7"}}};
EXPECT_EQ(out, expected);
}
} |
701 | cpp | google/tensorstore | function | tensorstore/serialization/function.cc | tensorstore/serialization/function_test.cc | #ifndef TENSORSTORE_SERIALIZATION_FUNCTION_H_
#define TENSORSTORE_SERIALIZATION_FUNCTION_H_
#include <cassert>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include <utility>
#include "absl/base/attributes.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_tuple.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
namespace tensorstore {
namespace serialization {
namespace internal_serialization {
struct RegisteredSerializableFunction;
class SerializableFunctionBase
: public internal::AtomicReferenceCount<SerializableFunctionBase> {
public:
using Ptr = internal::IntrusivePtr<SerializableFunctionBase>;
using ErasedFunction = void (*)();
virtual ~SerializableFunctionBase();
virtual bool Encode(EncodeSink& sink) const = 0;
virtual ErasedFunction erased_function() const = 0;
virtual void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const = 0;
};
void RegisterSerializableFunction(const RegisteredSerializableFunction& r);
struct RegisteredSerializableFunction {
using ErasedFunction = SerializableFunctionBase::ErasedFunction;
using DecodeFunction = bool (*)(DecodeSource& source,
SerializableFunctionBase::Ptr& impl);
RegisteredSerializableFunction(const std::type_info* signature,
std::string_view id, DecodeFunction decode)
: signature(signature), id(id), decode(decode) {
RegisterSerializableFunction(*this);
}
RegisteredSerializableFunction(const RegisteredSerializableFunction&) =
delete;
RegisteredSerializableFunction& operator=(
const RegisteredSerializableFunction&) = delete;
using Key = std::pair<std::type_index, std::string_view>;
Key key() const { return {*signature, id}; }
const std::type_info* signature;
std::string_view id;
DecodeFunction decode;
};
template <typename T, typename SFINAE = void>
constexpr inline bool HasId = false;
template <typename T>
constexpr inline bool HasId<T, std::void_t<decltype(T::id)>> = true;
template <typename T>
constexpr std::string_view GetFunctionId() {
if constexpr (HasId<T>) {
return T::id;
} else {
return typeid(T).name();
}
}
template <typename T, typename R, typename... Arg>
class SerializableFunctionImpl : public SerializableFunctionBase {
public:
SerializableFunctionImpl() = default;
SerializableFunctionImpl(T&& func) : func_(std::move(func)) {}
static R Invoke(const SerializableFunctionBase& base_impl, Arg... arg) {
return static_cast<const SerializableFunctionImpl&>(base_impl).func_(
std::forward<Arg>(arg)...);
}
static bool Decode(DecodeSource& source,
SerializableFunctionBase::Ptr& impl) {
impl.reset(new SerializableFunctionImpl);
return serialization::Decode(
source, static_cast<SerializableFunctionImpl&>(*impl).func_);
}
bool Encode(EncodeSink& sink) const final {
return serialization::EncodeTuple(sink, registry_entry_.id, func_);
}
ErasedFunction erased_function() const final {
return reinterpret_cast<ErasedFunction>(&SerializableFunctionImpl::Invoke);
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final {
garbage_collection::GarbageCollectionVisit(visitor, func_);
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
internal::DefaultConstructibleFunctionIfEmpty<T> func_;
static inline const RegisteredSerializableFunction registry_entry_{
&typeid(R(*)(Arg...)),
GetFunctionId<T>(),
&SerializableFunctionImpl::Decode,
};
};
class NonSerializableFunctionBase : public SerializableFunctionBase {
bool Encode(EncodeSink& sink) const final;
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final;
};
template <typename T, typename R, typename... Arg>
class NonSerializableFunctionImpl : public NonSerializableFunctionBase {
public:
NonSerializableFunctionImpl(T&& func) : func_(std::move(func)) {}
static R Invoke(const SerializableFunctionBase& base_impl, Arg... arg) {
return static_cast<const NonSerializableFunctionImpl&>(base_impl).func_(
std::forward<Arg>(arg)...);
}
ErasedFunction erased_function() const override {
return reinterpret_cast<ErasedFunction>(
&NonSerializableFunctionImpl::Invoke);
}
private:
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
internal::DefaultConstructibleFunctionIfEmpty<T> func_;
};
bool DecodeSerializableFunction(DecodeSource& source,
SerializableFunctionBase::Ptr& value,
const std::type_info& signature);
}
template <typename R, typename Func, typename... Arg>
constexpr inline bool IsSerializableFunctionLike =
std::is_invocable_r_v<R, const Func&, Arg...> &&
(IsSerializable<Func> || std::is_empty_v<Func> ||
IsNonSerializableLike<Func>);
template <typename Signature>
class SerializableFunction;
template <typename R, typename... Arg>
class SerializableFunction<R(Arg...)> {
public:
SerializableFunction() = default;
template <typename Func,
std::enable_if_t<(std::is_invocable_r_v<R, const Func&, Arg...> &&
!IsNonSerializableLike<Func> &&
(IsSerializable<Func> ||
std::is_empty_v<Func>))>* = nullptr>
SerializableFunction(Func func)
: impl_(new internal_serialization::SerializableFunctionImpl<Func, R,
Arg...>(
std::move(func))) {}
template <typename Func,
std::enable_if_t<(std::is_invocable_r_v<R, const Func&, Arg...> &&
IsNonSerializableLike<Func>)>* = nullptr>
SerializableFunction(Func func)
: impl_(new internal_serialization::NonSerializableFunctionImpl<Func, R,
Arg...>(
std::move(func))) {}
explicit operator bool() const { return static_cast<bool>(impl_); }
R operator()(Arg... arg) const {
assert(impl_);
auto function = impl_->erased_function();
return reinterpret_cast<R (*)(
const internal_serialization::SerializableFunctionBase&, Arg...)>(
function)(*impl_, std::forward<Arg>(arg)...);
}
private:
internal_serialization::SerializableFunctionBase::Ptr impl_;
friend struct Serializer<SerializableFunction<R(Arg...)>>;
friend struct garbage_collection::GarbageCollection<
SerializableFunction<R(Arg...)>>;
};
template <typename R, typename... Arg>
struct Serializer<SerializableFunction<R(Arg...)>> {
[[nodiscard]] static bool Encode(
EncodeSink& sink, const SerializableFunction<R(Arg...)>& value) {
return value.impl_->Encode(sink);
}
[[nodiscard]] static bool Decode(DecodeSource& source,
SerializableFunction<R(Arg...)>& value) {
return internal_serialization::DecodeSerializableFunction(
source, value.impl_, typeid(R(*)(Arg...)));
}
};
template <typename Func, typename... BoundArg>
class BindFront {
public:
BindFront() = default;
BindFront(const Func& func, const BoundArg&... bound_arg)
: func_(func), bound_args_(bound_arg...) {}
template <typename... Arg>
decltype(std::declval<const Func&>()(std::declval<const BoundArg&>()...,
std::declval<Arg>()...))
operator()(Arg&&... arg) const {
return std::apply(
[&](const auto&... bound_arg) {
return func_(bound_arg..., std::forward<Arg&&>(arg)...);
},
bound_args_);
}
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.func_, x.bound_args_);
};
private:
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
internal::DefaultConstructibleFunctionIfEmpty<Func> func_;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
std::tuple<internal::DefaultConstructibleFunctionIfEmpty<BoundArg>...>
bound_args_;
};
template <typename Func, typename... BoundArg>
BindFront(const Func& func,
const BoundArg&... bound_arg) -> BindFront<Func, BoundArg...>;
template <typename Func, typename... BoundArg>
class BindBack {
public:
BindBack() = default;
BindBack(const Func& func, const BoundArg&... bound_arg)
: func_(func), bound_args_(bound_arg...) {}
template <typename... Arg>
std::invoke_result_t<const Func&, Arg..., const BoundArg&...> operator()(
Arg&&... arg) const {
return std::apply(
[&](const auto&... bound_arg) {
return func_(std::forward<Arg>(arg)..., bound_arg...);
},
bound_args_);
}
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.func_, x.bound_args_);
};
private:
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
internal::DefaultConstructibleFunctionIfEmpty<Func> func_;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
std::tuple<internal::DefaultConstructibleFunctionIfEmpty<BoundArg>...>
bound_args_;
};
template <typename Func, typename... BoundArg>
BindBack(const Func& func,
const BoundArg&... bound_arg) -> BindBack<Func, BoundArg...>;
}
using serialization::NonSerializable;
using serialization::SerializableFunction;
namespace garbage_collection {
template <typename R, typename... Arg>
struct GarbageCollection<serialization::SerializableFunction<R(Arg...)>> {
static void Visit(
GarbageCollectionVisitor& visitor,
const serialization::SerializableFunction<R(Arg...)>& value) {
if (!value) return;
value.impl_->GarbageCollectionVisit(visitor);
}
};
}
}
#endif
#include "tensorstore/serialization/function.h"
#include <string_view>
#include <typeinfo>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
namespace internal_serialization {
bool NonSerializableFunctionBase::Encode(EncodeSink& sink) const {
sink.Fail(internal_serialization::NonSerializableError());
return false;
}
void NonSerializableFunctionBase::GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const {
}
using SerializableFunctionRegistry =
internal::HeterogeneousHashSet<const RegisteredSerializableFunction*,
RegisteredSerializableFunction::Key,
&RegisteredSerializableFunction::key>;
SerializableFunctionRegistry& GetSerializableFunctionRegistry() {
static absl::NoDestructor<SerializableFunctionRegistry> registry;
return *registry;
}
void RegisterSerializableFunction(const RegisteredSerializableFunction& r) {
if (!GetSerializableFunctionRegistry().insert(&r).second) {
ABSL_LOG(FATAL) << "Duplicate SerializableFunction registration: id="
<< r.id << ", signature=" << r.signature->name();
}
}
SerializableFunctionBase::~SerializableFunctionBase() = default;
bool DecodeSerializableFunction(DecodeSource& source,
SerializableFunctionBase::Ptr& value,
const std::type_info& signature) {
std::string_view id;
if (!serialization::Decode(source, id)) return false;
auto& registry = GetSerializableFunctionRegistry();
auto it = registry.find(RegisteredSerializableFunction::Key(signature, id));
if (it == registry.end()) {
source.Fail(absl::DataLossError(
tensorstore::StrCat("SerializableFunction not registered: ", id)));
return false;
}
return (*it)->decode(source, value);
}
}
}
} | #include "tensorstore/serialization/function.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::serialization::BindFront;
using ::tensorstore::serialization::NonSerializable;
using ::tensorstore::serialization::SerializableFunction;
using ::tensorstore::serialization::SerializationRoundTrip;
TEST(SerializationTest, Function) {
SerializableFunction<int()> func([] { return 3; });
EXPECT_EQ(3, func());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func_decoded,
SerializationRoundTrip(func));
EXPECT_EQ(3, func_decoded());
}
TEST(SerializationTest, BindFront) {
SerializableFunction<int()> func =
BindFront([](int a, int b) { return a + b; }, 2, 5);
EXPECT_EQ(7, func());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func_decoded,
SerializationRoundTrip(func));
EXPECT_EQ(7, func_decoded());
}
TEST(SerializationTest, NonSerializable) {
SerializableFunction<int()> func = NonSerializable{[y = 5] { return y; }};
EXPECT_EQ(5, func());
EXPECT_THAT(SerializationRoundTrip(func),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Serialization not supported.*"));
}
struct FunctionWithId1 {
constexpr static const char id[] = "my_test_function1";
int operator()() const { return 1; }
};
struct FunctionWithId2 {
constexpr static const char id[] = "my_test_function2";
int operator()() const { return 2; }
};
TEST(SerializationTest, Id) {
SerializableFunction<int()> func1 = FunctionWithId1{};
SerializableFunction<int()> func2 = FunctionWithId2{};
EXPECT_EQ(1, func1());
EXPECT_EQ(2, func2());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func1_copy,
SerializationRoundTrip(func1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func2_copy,
SerializationRoundTrip(func2));
EXPECT_EQ(1, func1_copy());
EXPECT_EQ(2, func2_copy());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto func1_encoded, tensorstore::serialization::EncodeBatch(func1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_func1_encoded,
tensorstore::serialization::EncodeBatch(
std::string_view(FunctionWithId1::id)));
EXPECT_EQ(expected_func1_encoded, func1_encoded);
}
} |
702 | cpp | google/tensorstore | absl_time | tensorstore/serialization/absl_time.cc | tensorstore/serialization/absl_time_test.cc | #ifndef TENSORSTORE_SERIALIZATION_ABSL_TIME_H_
#define TENSORSTORE_SERIALIZATION_ABSL_TIME_H_
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "tensorstore/serialization/fwd.h"
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(absl::Duration)
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(absl::Time)
#endif
#include "tensorstore/serialization/absl_time.h"
#include <cstdint>
#include <limits>
#include "absl/time/time.h"
#include "tensorstore/serialization/serialization.h"
namespace tensorstore {
namespace serialization {
bool Serializer<absl::Duration>::Encode(EncodeSink& sink,
const absl::Duration& value) {
int64_t rep_hi = absl::time_internal::GetRepHi(value);
uint32_t rep_lo = absl::time_internal::GetRepLo(value);
return serialization::EncodeTuple(sink, rep_hi, rep_lo);
}
bool Serializer<absl::Duration>::Decode(DecodeSource& source,
absl::Duration& value) {
int64_t rep_hi;
uint32_t rep_lo;
using absl::time_internal::kTicksPerSecond;
if (!serialization::DecodeTuple(source, rep_hi, rep_lo)) return false;
if (rep_lo >= kTicksPerSecond &&
(rep_lo != std::numeric_limits<uint32_t>::max() ||
(rep_hi != std::numeric_limits<int64_t>::min() &&
rep_hi != std::numeric_limits<int64_t>::max()))) {
source.Fail(serialization::DecodeError("Invalid time representation"));
return false;
}
value = absl::time_internal::MakeDuration(rep_hi, rep_lo);
return true;
}
bool Serializer<absl::Time>::Encode(EncodeSink& sink, const absl::Time& value) {
return serialization::Encode(sink, value - absl::UnixEpoch());
}
bool Serializer<absl::Time>::Decode(DecodeSource& source, absl::Time& value) {
absl::Duration d;
if (!serialization::Decode(source, d)) return false;
value = absl::UnixEpoch() + d;
return true;
}
}
} | #include "tensorstore/serialization/absl_time.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(DurationTest, SerializationRoundTrip) {
TestSerializationRoundTrip(absl::Seconds(1));
TestSerializationRoundTrip(-absl::Seconds(1));
TestSerializationRoundTrip(absl::Seconds(10));
TestSerializationRoundTrip(-absl::Seconds(10));
TestSerializationRoundTrip(absl::Nanoseconds(5000000000));
TestSerializationRoundTrip(-absl::Nanoseconds(5000000000));
TestSerializationRoundTrip(absl::InfiniteDuration());
TestSerializationRoundTrip(-absl::InfiniteDuration());
}
TEST(TimeTest, SerializationRoundTrip) {
TestSerializationRoundTrip(absl::Seconds(1) + absl::UnixEpoch());
TestSerializationRoundTrip(-absl::Seconds(1) + absl::UnixEpoch());
TestSerializationRoundTrip(absl::Seconds(10) + absl::UnixEpoch());
TestSerializationRoundTrip(-absl::Seconds(10) + absl::UnixEpoch());
TestSerializationRoundTrip(absl::Nanoseconds(5000000000) + absl::UnixEpoch());
TestSerializationRoundTrip(-absl::Nanoseconds(5000000000) +
absl::UnixEpoch());
TestSerializationRoundTrip(absl::InfiniteFuture());
TestSerializationRoundTrip(absl::InfinitePast());
}
} |
703 | cpp | google/tensorstore | constant_vector | tensorstore/util/constant_vector.cc | tensorstore/util/constant_vector_test.cc | #ifndef TENSORSTORE_UTIL_CONSTANT_VECTOR_H_
#define TENSORSTORE_UTIL_CONSTANT_VECTOR_H_
#include <array>
#include <cassert>
#include <cstddef>
#include <string>
#include <type_traits>
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_constant_vector {
template <typename T>
constexpr std::array<T, kMaxRank> GetConstantArray(T value) {
std::array<T, kMaxRank> array = {};
for (auto& x : array) x = value;
return array;
}
template <typename T, T Value>
constexpr inline std::array<T, kMaxRank> kConstantArray =
GetConstantArray(Value);
extern const std::string kStringArray[kMaxRank];
}
template <typename T, T Value>
constexpr span<const T> GetConstantVector(std::ptrdiff_t length) {
assert(IsValidRank(length));
return {internal_constant_vector::kConstantArray<T, Value>.data(), length};
}
template <typename T, T Value>
constexpr const T* GetConstantVector() {
return internal_constant_vector::kConstantArray<T, Value>.data();
}
template <typename T, T Value, std::ptrdiff_t Length>
constexpr span<const T, Length> GetConstantVector(
std::integral_constant<std::ptrdiff_t, Length> = {}) {
static_assert(IsValidRank(Length));
return {internal_constant_vector::kConstantArray<T, Value>.data(), Length};
}
inline constexpr span<const std::string> GetDefaultStringVector(
std::ptrdiff_t length) {
assert(IsValidRank(length));
return {internal_constant_vector::kStringArray, length};
}
inline constexpr const std::string* GetDefaultStringVector() {
return internal_constant_vector::kStringArray;
}
template <std::ptrdiff_t Length>
inline constexpr span<const std::string, Length> GetDefaultStringVector(
std::integral_constant<std::ptrdiff_t, Length> = {}) {
static_assert(IsValidRank(Length));
return {internal_constant_vector::kStringArray, Length};
}
}
#endif
#include "tensorstore/util/constant_vector.h"
#include <string>
#include "tensorstore/rank.h"
namespace tensorstore {
namespace internal_constant_vector {
const std::string kStringArray[kMaxRank] = {};
}
} | #include "tensorstore/util/constant_vector.h"
#include <string>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::GetConstantVector;
using ::tensorstore::span;
TEST(GetConstantVectorTest, RunTimeLengthInt) {
auto x = GetConstantVector<int, 3>(5);
static_assert(std::is_same_v<decltype(x), span<const int>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, ZeroRunTimeLengthInt) {
auto x = GetConstantVector<int, 3>(0);
static_assert(std::is_same_v<decltype(x), span<const int>>);
EXPECT_EQ(0, x.size());
}
TEST(GetConstantVectorTest, StaticLengthInt) {
constexpr auto x = GetConstantVector<int, 3, 5>();
static_assert(std::is_same_v<decltype(x), const span<const int, 5>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, StaticLengthIntUsingStaticRankValue) {
constexpr auto x = GetConstantVector<int, 3>(tensorstore::StaticRank<5>{});
static_assert(std::is_same_v<decltype(x), const span<const int, 5>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, StaticZeroLengthInt) {
constexpr auto x = GetConstantVector<int, 3, 0>();
static_assert(std::is_same_v<decltype(x), const span<const int, 0>>);
}
TEST(GetDefaultStringVectorTest, StaticLength) {
auto x = tensorstore::GetDefaultStringVector<2>();
static_assert(std::is_same_v<decltype(x), span<const std::string, 2>>);
EXPECT_THAT(x, ::testing::ElementsAre("", ""));
}
TEST(GetDefaultStringVectorTest, DynamicLength) {
auto x = tensorstore::GetDefaultStringVector(2);
static_assert(std::is_same_v<decltype(x), span<const std::string>>);
EXPECT_THAT(x, ::testing::ElementsAre("", ""));
}
} |
704 | cpp | google/tensorstore | quote_string | tensorstore/util/quote_string.cc | tensorstore/util/quote_string_test.cc | #ifndef TENSORSTORE_UTIL_QUOTE_STRING_H_
#define TENSORSTORE_UTIL_QUOTE_STRING_H_
#include <string>
#include <string_view>
namespace tensorstore {
std::string QuoteString(std::string_view s);
}
#endif
#include "tensorstore/util/quote_string.h"
#include <string>
#include <string_view>
#include "absl/strings/escaping.h"
namespace tensorstore {
std::string QuoteString(std::string_view s) {
return '"' + absl::CHexEscape(absl::string_view(s.data(), s.size())) + '"';
}
} | #include "tensorstore/util/quote_string.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::QuoteString;
TEST(QuoteStringTest, Basic) {
EXPECT_EQ("\"abc \"", QuoteString("abc "));
EXPECT_EQ("\"a\\\"b\\n\\x01\"", QuoteString("a\"b\n\x01"));
EXPECT_EQ("\"\\'\"", QuoteString("'"));
}
} |
705 | cpp | google/tensorstore | utf8_string | tensorstore/util/utf8_string.cc | tensorstore/util/utf8_string_test.cc | #ifndef TENSORSTORE_UTIL_UTF8_STRING_H_
#define TENSORSTORE_UTIL_UTF8_STRING_H_
#include <ostream>
#include <string>
#include "tensorstore/serialization/fwd.h"
namespace tensorstore {
struct Utf8String {
std::string utf8;
friend bool operator<(const Utf8String& a, const Utf8String& b) {
return a.utf8 < b.utf8;
}
friend bool operator<=(const Utf8String& a, const Utf8String& b) {
return a.utf8 <= b.utf8;
}
friend bool operator>(const Utf8String& a, const Utf8String& b) {
return a.utf8 > b.utf8;
}
friend bool operator>=(const Utf8String& a, const Utf8String& b) {
return a.utf8 >= b.utf8;
}
friend bool operator==(const Utf8String& a, const Utf8String& b) {
return a.utf8 == b.utf8;
}
friend bool operator!=(const Utf8String& a, const Utf8String& b) {
return a.utf8 != b.utf8;
}
friend std::ostream& operator<<(std::ostream& os, const Utf8String& s) {
return os << s.utf8;
}
static constexpr auto ApplyMembers = [](auto&& x, auto f) {
return f(x.utf8);
};
};
static_assert(sizeof(Utf8String) == sizeof(std::string), "");
}
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(tensorstore::Utf8String)
#endif
#include "tensorstore/util/utf8_string.h"
#include "tensorstore/internal/riegeli/delimited.h"
#include "tensorstore/internal/utf8.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
bool Serializer<Utf8String>::Encode(EncodeSink& sink, const Utf8String& value) {
return serialization::WriteDelimited(sink.writer(), value.utf8);
}
bool Serializer<Utf8String>::Decode(DecodeSource& source, Utf8String& value) {
return serialization::ReadDelimitedUtf8(source.reader(), value.utf8);
}
}
} | #include "tensorstore/util/utf8_string.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Utf8String;
using ::tensorstore::serialization::SerializationRoundTrip;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(SerializationTest, Valid) {
TestSerializationRoundTrip(Utf8String{""});
TestSerializationRoundTrip(Utf8String{"abc"});
TestSerializationRoundTrip(Utf8String{"\xc2\x80hello\xc2\xbf"});
}
TEST(SerializationTest, Invalid) {
EXPECT_THAT(SerializationRoundTrip(Utf8String{"\xC1"}),
MatchesStatus(absl::StatusCode::kDataLoss,
"String is not valid utf-8: .*"));
}
} |
706 | cpp | google/tensorstore | status_testutil | tensorstore/util/status_testutil.cc | tensorstore/util/status_testutil_test.cc | #ifndef TENSORSTORE_UTIL_STATUS_TESTUTIL_H_
#define TENSORSTORE_UTIL_STATUS_TESTUTIL_H_
#include <ostream>
#include <string>
#include <system_error>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
template <typename T>
void PrintTo(const Result<T>& result, std::ostream* os) {
if (result) {
if constexpr (std::is_void_v<T>) {
*os << "Result{}";
} else {
*os << "Result{" << ::testing::PrintToString(*result) << "}";
}
} else {
*os << result.status();
}
}
template <typename T>
void PrintTo(const Future<T>& future, std::ostream* os) {
if (!future.ready()) {
*os << "Future{<not ready>}";
return;
}
if (future.status().ok()) {
if constexpr (std::is_void_v<T>) {
*os << "Future{}";
} else {
*os << "Future{" << ::testing::PrintToString(future.value()) << "}";
}
} else {
*os << future.status();
}
}
namespace internal_status {
template <typename StatusType>
class IsOkAndHoldsMatcherImpl : public ::testing::MatcherInterface<StatusType> {
public:
typedef
typename std::remove_reference<StatusType>::type::value_type value_type;
template <typename InnerMatcher>
explicit IsOkAndHoldsMatcherImpl(InnerMatcher&& inner_matcher)
: inner_matcher_(::testing::SafeMatcherCast<const value_type&>(
std::forward<InnerMatcher>(inner_matcher))) {}
void DescribeTo(std::ostream* os) const override {
*os << "is OK and has a value that ";
inner_matcher_.DescribeTo(os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "isn't OK or has a value that ";
inner_matcher_.DescribeNegationTo(os);
}
bool MatchAndExplain(
StatusType actual_value,
::testing::MatchResultListener* result_listener) const override {
auto status = ::tensorstore::GetStatus(actual_value);
if (!status.ok()) {
*result_listener << "whose status code is "
<< absl::StatusCodeToString(status.code());
return false;
}
::testing::StringMatchResultListener inner_listener;
if (!inner_matcher_.MatchAndExplain(actual_value.value(),
&inner_listener)) {
*result_listener << "whose value "
<< ::testing::PrintToString(actual_value.value())
<< " doesn't match";
if (!inner_listener.str().empty()) {
*result_listener << ", " << inner_listener.str();
}
return false;
}
return true;
}
private:
const ::testing::Matcher<const value_type&> inner_matcher_;
};
template <typename InnerMatcher>
class IsOkAndHoldsMatcher {
public:
explicit IsOkAndHoldsMatcher(InnerMatcher inner_matcher)
: inner_matcher_(std::move(inner_matcher)) {}
template <typename StatusType>
operator ::testing::Matcher<StatusType>() const {
return ::testing::Matcher<StatusType>(
new IsOkAndHoldsMatcherImpl<const StatusType&>(inner_matcher_));
}
private:
const InnerMatcher inner_matcher_;
};
template <typename StatusType>
class MonoIsOkMatcherImpl : public ::testing::MatcherInterface<StatusType> {
public:
void DescribeTo(std::ostream* os) const override { *os << "is OK"; }
void DescribeNegationTo(std::ostream* os) const override {
*os << "is not OK";
}
bool MatchAndExplain(
StatusType actual_value,
::testing::MatchResultListener* result_listener) const override {
return ::tensorstore::GetStatus(actual_value).ok();
}
};
class IsOkMatcher {
public:
template <typename StatusType>
operator ::testing::Matcher<StatusType>() const {
return ::testing::Matcher<StatusType>(
new MonoIsOkMatcherImpl<const StatusType&>());
}
};
template <typename StatusType>
class StatusIsMatcherImpl : public ::testing::MatcherInterface<StatusType> {
public:
explicit StatusIsMatcherImpl(
testing::Matcher<absl::StatusCode> code_matcher,
testing::Matcher<const std::string&> message_matcher)
: code_matcher_(std::move(code_matcher)),
message_matcher_(std::move(message_matcher)) {}
void DescribeTo(std::ostream* os) const override {
*os << "has a status code that ";
code_matcher_.DescribeTo(os);
*os << ", and has an error message that ";
message_matcher_.DescribeTo(os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "has a status code that ";
code_matcher_.DescribeNegationTo(os);
*os << ", or has an error message that ";
message_matcher_.DescribeNegationTo(os);
}
bool MatchAndExplain(
StatusType actual_value,
::testing::MatchResultListener* result_listener) const override {
auto status = ::tensorstore::GetStatus(actual_value);
testing::StringMatchResultListener inner_listener;
if (!code_matcher_.MatchAndExplain(status.code(), &inner_listener)) {
*result_listener << "whose status code "
<< absl::StatusCodeToString(status.code())
<< " doesn't match";
const std::string inner_explanation = inner_listener.str();
if (!inner_explanation.empty()) {
*result_listener << ", " << inner_explanation;
}
return false;
}
if (!message_matcher_.Matches(std::string(status.message()))) {
*result_listener << "whose error message is wrong";
return false;
}
return true;
}
private:
const testing::Matcher<absl::StatusCode> code_matcher_;
const testing::Matcher<const std::string&> message_matcher_;
};
class StatusIsMatcher {
public:
StatusIsMatcher(testing::Matcher<absl::StatusCode> code_matcher,
testing::Matcher<const std::string&> message_matcher)
: code_matcher_(std::move(code_matcher)),
message_matcher_(std::move(message_matcher)) {}
template <typename StatusType>
operator ::testing::Matcher<StatusType>() const {
return ::testing::Matcher<StatusType>(
new StatusIsMatcherImpl<const StatusType&>(code_matcher_,
message_matcher_));
}
private:
const testing::Matcher<absl::StatusCode> code_matcher_;
const testing::Matcher<const std::string&> message_matcher_;
};
}
inline internal_status::IsOkMatcher IsOk() {
return internal_status::IsOkMatcher();
}
template <typename InnerMatcher>
internal_status::IsOkAndHoldsMatcher<typename std::decay<InnerMatcher>::type>
IsOkAndHolds(InnerMatcher&& inner_matcher) {
return internal_status::IsOkAndHoldsMatcher<
typename std::decay<InnerMatcher>::type>(
std::forward<InnerMatcher>(inner_matcher));
}
template <typename CodeMatcher, typename MessageMatcher>
internal_status::StatusIsMatcher StatusIs(CodeMatcher code_matcher,
MessageMatcher message_matcher) {
return internal_status::StatusIsMatcher(std::move(code_matcher),
std::move(message_matcher));
}
template <typename CodeMatcher>
internal_status::StatusIsMatcher StatusIs(CodeMatcher code_matcher) {
return internal_status::StatusIsMatcher(std::move(code_matcher),
::testing::_);
}
inline internal_status::StatusIsMatcher MatchesStatus(
absl::StatusCode status_code) {
return internal_status::StatusIsMatcher(status_code, ::testing::_);
}
internal_status::StatusIsMatcher MatchesStatus(
absl::StatusCode status_code, const std::string& message_pattern);
}
#define TENSORSTORE_EXPECT_OK(expr) EXPECT_THAT(expr, ::tensorstore::IsOk())
#define TENSORSTORE_ASSERT_OK(expr) ASSERT_THAT(expr, ::tensorstore::IsOk())
#define TENSORSTORE_ASSERT_OK_AND_ASSIGN(decl, expr) \
TENSORSTORE_ASSIGN_OR_RETURN(decl, expr, \
([&] { FAIL() << #expr << ": " << _; })()) \
#endif
#include "tensorstore/util/status_testutil.h"
#include <ostream>
#include <regex>
#include <string>
#include <system_error>
#include <gmock/gmock.h>
#include "absl/status/status.h"
namespace tensorstore {
namespace internal_status {
namespace {
template <typename StringType>
class RegexMatchImpl : public ::testing::MatcherInterface<StringType> {
public:
RegexMatchImpl(const std::string& message_pattern)
: message_pattern_(message_pattern) {}
void DescribeTo(std::ostream* os) const override {
*os << "message matches pattern ";
::testing::internal::UniversalPrint(message_pattern_, os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "message doesn't match pattern ";
::testing::internal::UniversalPrint(message_pattern_, os);
}
bool MatchAndExplain(
StringType message,
::testing::MatchResultListener* result_listener) const override {
return std::regex_match(message, std::regex(message_pattern_));
}
private:
const std::string message_pattern_;
};
}
}
internal_status::StatusIsMatcher MatchesStatus(
absl::StatusCode status_code, const std::string& message_pattern) {
return internal_status::StatusIsMatcher(
status_code, ::testing::Matcher<const std::string&>(
new internal_status::RegexMatchImpl<const std::string&>(
message_pattern)));
}
} | #include "tensorstore/util/status_testutil.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::Future;
using ::tensorstore::Result;
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(StatusTestutilTest, IsOk) {
EXPECT_THAT([]() -> Future<void> { return absl::OkStatus(); }(),
::tensorstore::IsOk());
EXPECT_THAT([]() -> Result<void> { return absl::OkStatus(); }(),
::tensorstore::IsOk());
EXPECT_THAT(absl::OkStatus(), ::tensorstore::IsOk());
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOk());
EXPECT_THAT(Future<int>{2}, ::tensorstore::IsOk());
EXPECT_THAT(absl::InternalError(""), ::testing::Not(::tensorstore::IsOk()));
EXPECT_THAT(Explain(::tensorstore::IsOk(), absl::InternalError("")),
testing::IsEmpty());
EXPECT_THAT(Explain(::tensorstore::IsOk(), absl::OkStatus()),
testing::IsEmpty());
TENSORSTORE_EXPECT_OK(absl::OkStatus());
TENSORSTORE_ASSERT_OK(absl::OkStatus());
TENSORSTORE_EXPECT_OK([]() -> Future<void> { return absl::OkStatus(); }());
TENSORSTORE_ASSERT_OK([]() -> Result<void> { return absl::OkStatus(); }());
}
TEST(StatusTestutilTest, Optional) {
EXPECT_THAT(Result<int>{1}, ::testing::Optional(1));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::testing::Optional(1)));
EXPECT_THAT(Result<int>{1}, ::testing::Optional(::testing::_));
EXPECT_THAT(Result<int>{2}, ::testing::Optional(::testing::Not(1)));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::testing::Optional(1)));
EXPECT_THAT(
Explain(::testing::Optional(1), Result<int>(absl::InternalError(""))),
testing::HasSubstr("which is not engaged"));
EXPECT_THAT(Explain(::testing::Optional(1), Result<int>(2)),
testing::HasSubstr("whose value 2 doesn't match"));
}
TEST(StatusTestutilTest, IsOkAndHolds) {
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOkAndHolds(1));
EXPECT_THAT(Future<int>{2}, ::tensorstore::IsOkAndHolds(2));
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOkAndHolds(::testing::_));
EXPECT_THAT(Result<int>{2}, ::tensorstore::IsOkAndHolds(::testing::Not(1)));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::tensorstore::IsOkAndHolds(1)));
int result;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(result, []() -> Result<int> { return 2; }());
EXPECT_EQ(2, result);
EXPECT_THAT(Explain(::tensorstore::IsOkAndHolds(1),
Result<int>(absl::InternalError(""))),
testing::HasSubstr("whose status code is INTERNAL"));
EXPECT_THAT(Explain(::tensorstore::IsOkAndHolds(1), Result<int>(2)),
testing::HasSubstr("whose value 2 doesn't match"));
}
TEST(StatusTestutilTest, StatusIs) {
EXPECT_THAT(Result<void>{absl::InternalError("")},
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(Future<void>{absl::InternalError("")},
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(absl::InternalError(""),
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(
absl::OkStatus(),
::testing::Not(::tensorstore::StatusIs(absl::StatusCode::kInternal)));
EXPECT_THAT(absl::OkStatus(), ::tensorstore::StatusIs(absl::StatusCode::kOk));
EXPECT_THAT(Explain(::tensorstore::StatusIs(absl::StatusCode::kOk),
absl::InternalError("")),
testing::HasSubstr("whose status code INTERNAL doesn't match"));
}
TEST(StatusTestutilTest, StatusIs_WithMessage) {
EXPECT_THAT(
Result<void>{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(
Future<void>{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(
absl::InternalError("strongbad"),
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(absl::InternalError("strongbad"),
::tensorstore::StatusIs(
::testing::_, ::testing::Not(::testing::HasSubstr("good"))));
EXPECT_THAT(
absl::Status{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::Not(absl::StatusCode::kAborted),
::testing::Not(::testing::HasSubstr("good"))));
}
TEST(StatusTestutilTest, MatchesStatus) {
EXPECT_THAT(Result<void>{absl::InternalError("")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(Future<void>{absl::InternalError("")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(absl::InternalError(""),
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(absl::OkStatus(),
::tensorstore::MatchesStatus(absl::StatusCode::kOk));
}
TEST(StatusTestutilTest, MatchesStatus_Pattern) {
EXPECT_THAT(Result<void>{absl::InternalError("a")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(Future<void>{absl::InternalError("a")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(absl::InternalError("a"),
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(absl::InternalError("a"),
::testing::Not(::tensorstore::MatchesStatus(
absl::StatusCode::kInternal, "b")));
EXPECT_THAT(absl::InternalError("a"),
::testing::Not(::tensorstore::MatchesStatus(
absl::StatusCode::kCancelled, "a")));
}
} |
707 | cpp | google/tensorstore | element_pointer | tensorstore/util/element_pointer.cc | tensorstore/util/element_pointer_test.cc | #ifndef TENSORSTORE_UTIL_ELEMENT_POINTER_H_
#define TENSORSTORE_UTIL_ELEMENT_POINTER_H_
#include <cstddef>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/container/compressed_pair.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_traits.h"
namespace tensorstore {
template <typename Element>
class Shared;
template <typename T>
constexpr inline bool IsShared = false;
template <typename T>
constexpr inline bool IsShared<Shared<T>> = true;
template <typename T>
constexpr inline bool IsShared<const Shared<T>> = true;
template <typename T>
constexpr inline bool IsElementTag = (IsElementType<T> &&
!IsShared<T>);
template <typename T>
constexpr inline bool IsElementTag<Shared<T>> = (IsElementType<T> &&
!IsShared<T>);
template <typename T>
struct ElementTagTraits {
static_assert(IsElementTag<T>, "T must be an ElementTag type.");
using Element = T;
using Pointer = T*;
template <typename U>
using rebind = U;
};
template <typename T>
struct ElementTagTraits<Shared<T>> {
static_assert(IsElementTag<Shared<T>>,
"Shared<T> must be an ElementTag type.");
using Element = T;
using Pointer = std::shared_ptr<T>;
template <typename U>
using rebind = Shared<U>;
};
namespace internal_element_pointer {
template <typename T>
struct PointerElementTagType;
template <typename T>
struct PointerElementTagType<T*> {
using type = T;
};
template <typename T>
struct PointerElementTagType<std::shared_ptr<T>> {
using type = Shared<T>;
};
}
template <typename T>
using PointerElementTag =
typename internal_element_pointer::PointerElementTagType<T>::type;
template <typename SourcePointer, typename TargetPointer>
constexpr inline bool IsArrayBasePointerConvertible = false;
template <typename SourceElement, typename TargetElement>
constexpr inline bool
IsArrayBasePointerConvertible<SourceElement*, TargetElement*> =
IsElementTypeImplicitlyConvertible<SourceElement, TargetElement>;
template <typename SourceElement, typename TargetElement>
constexpr inline bool IsArrayBasePointerConvertible<
ByteStridedPointer<SourceElement>, TargetElement*> =
IsElementTypeImplicitlyConvertible<SourceElement, TargetElement>;
template <typename SourceElement, typename TargetElement>
constexpr inline bool IsArrayBasePointerConvertible<
std::shared_ptr<SourceElement>, TargetElement*> =
IsElementTypeImplicitlyConvertible<SourceElement, TargetElement>;
template <typename SourceElement, typename TargetElement>
constexpr inline bool IsArrayBasePointerConvertible<
std::shared_ptr<SourceElement>, std::shared_ptr<TargetElement>> =
IsElementTypeImplicitlyConvertible<SourceElement, TargetElement>;
template <typename Source, typename Target>
constexpr inline bool IsElementPointerCastConvertible = false;
template <typename ElementTag>
class ElementPointer;
template <typename SourceTag, typename TargetTag>
constexpr inline bool IsElementPointerCastConvertible<
ElementPointer<SourceTag>, ElementPointer<TargetTag>> =
((IsShared<SourceTag> >= IsShared<TargetTag>)&&
IsElementTypeExplicitlyConvertible<
std::remove_const_t<typename ElementTagTraits<SourceTag>::Element>,
std::remove_const_t<typename ElementTagTraits<TargetTag>::Element>>);
template <typename T>
constexpr inline bool IsNonVoidArrayBasePointer = false;
template <typename T>
constexpr inline bool IsNonVoidArrayBasePointer<T*> = !std::is_void_v<T>;
template <typename T>
constexpr inline bool IsNonVoidArrayBasePointer<std::shared_ptr<T>> =
!std::is_void_v<T>;
namespace internal_element_pointer {
template <typename Target, typename Source>
inline std::enable_if_t<(std::is_pointer_v<Target> ==
std::is_pointer_v<internal::remove_cvref_t<Source>>),
Target>
ConvertPointer(Source&& x) {
return internal::StaticConstPointerCast<
typename std::pointer_traits<Target>::element_type>(
static_cast<Source&&>(x));
}
template <typename Target, typename Source>
inline std::enable_if_t<(std::is_pointer_v<Target> >
std::is_pointer_v<internal::remove_cvref_t<Source>>),
Target>
ConvertPointer(Source&& x) {
return internal::StaticConstPointerCast<
typename std::pointer_traits<Target>::element_type>(x.get());
}
std::string DescribeForCast(DataType dtype);
}
template <typename ElementTagType>
class ElementPointer;
template <typename T>
constexpr inline bool IsElementPointer = false;
template <typename PointerType>
constexpr inline bool IsElementPointer<ElementPointer<PointerType>> = true;
template <typename ElementTagType>
class ElementPointer {
using Traits = ElementTagTraits<ElementTagType>;
public:
static_assert(IsElementTag<ElementTagType>,
"ElementTagType must be an ElementTag type.");
using ElementTag = ElementTagType;
using Pointer = typename Traits::Pointer;
using element_type = typename std::pointer_traits<Pointer>::
element_type;
using Element = element_type;
using DataType = dtype_t<Element>;
template <typename OtherElement>
using rebind = ElementPointer<typename Traits::template rebind<OtherElement>>;
ElementPointer() = default;
ElementPointer(std::nullptr_t) {}
template <
typename Source,
std::enable_if_t<(IsElementPointer<internal::remove_cvref_t<Source>> &&
IsArrayBasePointerConvertible<
typename internal::remove_cvref_t<Source>::Pointer,
Pointer>)>* = nullptr>
ElementPointer(Source&& source)
: storage_(source.dtype(),
internal_element_pointer::ConvertPointer<Pointer>(
std::forward<Source>(source).pointer())) {}
template <typename Source,
std::enable_if_t<IsElementPointerCastConvertible<
internal::remove_cvref_t<Source>, ElementPointer>>* = nullptr>
explicit ElementPointer(unchecked_t, Source&& source)
: storage_(DataType(unchecked, source.dtype()),
internal_element_pointer::ConvertPointer<Pointer>(
std::forward<Source>(source).pointer())) {}
template <
typename SourcePointer,
std::enable_if_t<
IsNonVoidArrayBasePointer<internal::remove_cvref_t<SourcePointer>> &&
IsArrayBasePointerConvertible<internal::remove_cvref_t<SourcePointer>,
Pointer>>* = nullptr>
ElementPointer(SourcePointer&& pointer)
: storage_(pointee_dtype_t<SourcePointer>(),
internal::static_pointer_cast<Element>(
internal_element_pointer::ConvertPointer<Pointer>(
std::forward<SourcePointer>(pointer)))) {}
template <typename SourcePointer,
std::enable_if_t<IsArrayBasePointerConvertible<
internal::remove_cvref_t<SourcePointer>, Pointer>>* = nullptr>
ElementPointer(SourcePointer&& pointer, pointee_dtype_t<SourcePointer> dtype)
: storage_(dtype, internal::static_pointer_cast<Element>(
internal_element_pointer::ConvertPointer<Pointer>(
std::forward<SourcePointer>(pointer)))) {}
template <typename Source>
std::enable_if_t<std::is_constructible_v<ElementPointer, Source&&>,
ElementPointer&>
operator=(Source&& source) {
return *this = ElementPointer(static_cast<Source&&>(source));
}
constexpr DataType dtype() const { return storage_.first(); }
Element* data() const { return internal::to_address(pointer()); }
ByteStridedPointer<Element> byte_strided_pointer() const { return data(); }
const Pointer& pointer() const& { return storage_.second(); }
Pointer& pointer() & { return storage_.second(); }
Pointer&& pointer() && { return static_cast<Pointer&&>(storage_.second()); }
explicit operator bool() const { return data() != nullptr; }
template <typename B>
friend bool operator==(const ElementPointer& a, const ElementPointer<B>& b) {
return a.data() == b.data() && a.dtype() == b.dtype();
}
template <typename B>
friend bool operator!=(const ElementPointer& a, const ElementPointer<B>& b) {
return !(a == b);
}
friend bool operator==(const ElementPointer& p, std::nullptr_t) {
return p.data() == nullptr;
}
friend bool operator==(std::nullptr_t, const ElementPointer& p) {
return p.data() == nullptr;
}
friend bool operator!=(const ElementPointer& p, std::nullptr_t) {
return p.data() != nullptr;
}
friend bool operator!=(std::nullptr_t, const ElementPointer& p) {
return p.data() != nullptr;
}
private:
using Storage = internal::CompressedPair<DataType, Pointer>;
Storage storage_{DataType(), nullptr};
};
template <typename Element>
using SharedElementPointer = ElementPointer<Shared<Element>>;
template <typename ElementTag>
struct StaticCastTraits<ElementPointer<ElementTag>>
: public DefaultStaticCastTraits<ElementPointer<ElementTag>> {
using type = ElementPointer<ElementTag>;
template <typename TargetElement>
using RebindDataType =
typename ElementPointer<ElementTag>::template rebind<TargetElement>;
template <typename OtherElementTag>
static bool IsCompatible(const ElementPointer<OtherElementTag>& other) {
return IsPossiblySameDataType(typename type::DataType(), other.dtype());
}
static std::string Describe() {
return internal_element_pointer::DescribeForCast(typename type::DataType());
}
static std::string Describe(const type& x) {
return internal_element_pointer::DescribeForCast(x.dtype());
}
};
template <typename Element>
std::enable_if_t<!IsShared<Element>, ElementPointer<Shared<Element>>>
UnownedToShared(ElementPointer<Element> element_pointer) {
return {internal::UnownedToShared(element_pointer.pointer()),
element_pointer.dtype()};
}
template <typename T, typename Element>
std::enable_if_t<!IsShared<Element>, ElementPointer<Shared<Element>>>
UnownedToShared(const std::shared_ptr<T>& owned,
ElementPointer<Element> element_pointer) {
return {std::shared_ptr<Element>(owned, element_pointer.pointer()),
element_pointer.dtype()};
}
template <typename T>
inline T* AddByteOffset(T* x, Index byte_offset) {
return (ByteStridedPointer<T>(x) + byte_offset).get();
}
template <typename T>
inline std::shared_ptr<T> AddByteOffset(const std::shared_ptr<T>& x,
Index byte_offset) {
return std::shared_ptr<T>(x, AddByteOffset(x.get(), byte_offset));
}
template <typename ElementTag>
inline ElementPointer<ElementTag> AddByteOffset(
const ElementPointer<ElementTag>& x, Index byte_offset) {
return {AddByteOffset(x.pointer(), byte_offset), x.dtype()};
}
template <typename ElementTag>
inline ElementPointer<ElementTag> AddByteOffset(ElementPointer<ElementTag>&& x,
Index byte_offset) {
return {AddByteOffset(std::move(x.pointer()), byte_offset), x.dtype()};
}
namespace internal_element_pointer {
template <typename Pointer>
struct DeducedElementTagHelper {};
template <typename T>
struct DeducedElementTagHelper<ElementPointer<T>> {
using type = T;
};
template <typename T>
struct DeducedElementTagHelper<std::shared_ptr<T>>
: public std::enable_if<!std::is_void_v<T>, Shared<T>> {};
template <typename T>
struct DeducedElementTagHelper<T*>
: public std::enable_if<!std::is_void_v<T>, T> {};
}
template <typename T>
using DeducedElementTag =
typename internal_element_pointer::DeducedElementTagHelper<
internal::remove_cvref_t<T>>::type;
template <typename Pointer>
ElementPointer(Pointer pointer) -> ElementPointer<DeducedElementTag<Pointer>>;
}
#endif
#include "tensorstore/util/element_pointer.h"
#include <string>
#include "tensorstore/data_type.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_element_pointer {
std::string DescribeForCast(DataType dtype) {
return tensorstore::StrCat("pointer with ",
StaticCastTraits<DataType>::Describe(dtype));
}
}
} | #include "tensorstore/util/element_pointer.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/data_type.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::ElementPointer;
using ::tensorstore::ElementTagTraits;
using ::tensorstore::IsElementTag;
using ::tensorstore::MatchesStatus;
using ::tensorstore::PointerElementTag;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::SharedElementPointer;
using ::tensorstore::StaticDataTypeCast;
static_assert(IsElementTag<int>);
static_assert(IsElementTag<void>);
static_assert(IsElementTag<const void>);
static_assert(IsElementTag<int*>);
static_assert(IsElementTag<const int>);
static_assert(!IsElementTag<volatile int>);
static_assert(!IsElementTag<int(int)>);
static_assert(IsElementTag<int (*)(int)>);
static_assert(IsElementTag<Shared<int>>);
static_assert(!IsElementTag<const Shared<int>>);
static_assert(!IsElementTag<Shared<Shared<int>>>);
static_assert(!IsElementTag<Shared<const Shared<int>>>);
static_assert(!IsElementTag<Shared<const Shared<Shared<int>>>>);
static_assert(std::is_same_v<ElementTagTraits<int>::Pointer, int*>);
static_assert(std::is_same_v<ElementTagTraits<int>::rebind<float>, float>);
static_assert(std::is_same_v<ElementTagTraits<Shared<int>>::Pointer,
std::shared_ptr<int>>);
static_assert(std::is_same_v<ElementTagTraits<Shared<int>>::rebind<float>,
Shared<float>>);
static_assert(std::is_same_v<PointerElementTag<int*>, int>);
static_assert(
std::is_same_v<PointerElementTag<std::shared_ptr<int>>, Shared<int>>);
static_assert(
std::is_convertible_v<ElementPointer<int>, ElementPointer<const int>>);
static_assert(
!std::is_convertible_v<ElementPointer<const int>, ElementPointer<int>>);
static_assert(std::is_convertible_v<ElementPointer<int>, ElementPointer<void>>);
static_assert(
!std::is_convertible_v<ElementPointer<void>, ElementPointer<int>>);
static_assert(
!std::is_convertible_v<ElementPointer<const int>, ElementPointer<int>>);
static_assert(
!std::is_convertible_v<ElementPointer<const int>, ElementPointer<void>>);
static_assert(std::is_convertible_v<ElementPointer<const int>,
ElementPointer<const void>>);
static_assert(std::is_convertible_v<int*, ElementPointer<int>>);
static_assert(!std::is_convertible_v<const int*, ElementPointer<int>>);
static_assert(std::is_convertible_v<const int*, ElementPointer<const int>>);
static_assert(std::is_convertible_v<int*, ElementPointer<void>>);
static_assert(!std::is_convertible_v<const int*, ElementPointer<void>>);
static_assert(std::is_convertible_v<int*, ElementPointer<const int>>);
static_assert(std::is_convertible_v<int*, ElementPointer<const void>>);
static_assert(std::is_constructible_v<ElementPointer<void>, void*, DataType>);
static_assert(
std::is_constructible_v<ElementPointer<const void>, void*, DataType>);
static_assert(!std::is_constructible_v<ElementPointer<void>, void*>);
static_assert(std::is_convertible_v<SharedElementPointer<int>,
SharedElementPointer<const int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<int>>);
static_assert(std::is_convertible_v<SharedElementPointer<int>,
SharedElementPointer<void>>);
static_assert(!std::is_convertible_v<SharedElementPointer<void>,
SharedElementPointer<int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<void>>);
static_assert(std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<const void>>);
static_assert(
std::is_convertible_v<std::shared_ptr<int>, SharedElementPointer<int>>);
static_assert(std::is_convertible_v<std::shared_ptr<const int>,
SharedElementPointer<const int>>);
static_assert(
std::is_convertible_v<std::shared_ptr<int>, SharedElementPointer<void>>);
static_assert(!std::is_convertible_v<std::shared_ptr<const int>,
SharedElementPointer<void>>);
static_assert(std::is_convertible_v<std::shared_ptr<int>,
SharedElementPointer<const int>>);
static_assert(std::is_convertible_v<std::shared_ptr<int>,
SharedElementPointer<const void>>);
static_assert(std::is_constructible_v<SharedElementPointer<void>,
std::shared_ptr<void>, DataType>);
static_assert(std::is_constructible_v<SharedElementPointer<const void>,
std::shared_ptr<void>, DataType>);
static_assert(
std::is_convertible_v<SharedElementPointer<int>, ElementPointer<int>>);
static_assert(std::is_convertible_v<SharedElementPointer<int>,
ElementPointer<const int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<void>,
ElementPointer<const int>>);
static_assert(!std::is_constructible_v<ElementPointer<const int>,
SharedElementPointer<void>>);
static_assert(!std::is_constructible_v<ElementPointer<int>,
SharedElementPointer<const void>>);
TEST(ElementPointerTest, StaticType) {
{
ElementPointer<float> p_null;
EXPECT_EQ(nullptr, p_null.data());
}
{
ElementPointer<float> p_null = nullptr;
EXPECT_EQ(nullptr, p_null.data());
}
float value;
ElementPointer<float> p = &value;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(&value, p.pointer());
EXPECT_EQ(dtype_v<float>, p.dtype());
{
ElementPointer<const float> p_const = p;
EXPECT_EQ(&value, p_const.data());
EXPECT_EQ(dtype_v<float>, p_const.dtype());
p_const.pointer() = nullptr;
EXPECT_EQ(nullptr, p_const.data());
}
{
ElementPointer<float> p_copy = p;
EXPECT_EQ(&value, p_copy.data());
}
ElementPointer<const void> other = p;
EXPECT_EQ(&value, other.data());
EXPECT_EQ(other.dtype(), p.dtype());
{
auto p2 = tensorstore::StaticDataTypeCast<const float>(other);
static_assert(
std::is_same_v<decltype(p2), Result<ElementPointer<const float>>>);
TENSORSTORE_ASSERT_OK(p2);
EXPECT_EQ(&value, p2->data());
}
{
ElementPointer<const float> p_const;
p_const = p;
EXPECT_EQ(&value, p_const.data());
p_const = nullptr;
EXPECT_EQ(nullptr, p_const.data());
}
static_assert(!std::is_assignable_v<ElementPointer<float>,
ElementPointer<const float>>);
static_assert(
!std::is_assignable_v<ElementPointer<int>, ElementPointer<float>>);
static_assert(!std::is_assignable_v<ElementPointer<int>, float*>);
static_assert(!std::is_assignable_v<ElementPointer<void>, void*>);
static_assert(!std::is_assignable_v<ElementPointer<const float>,
ElementPointer<const void>>);
static_assert(!std::is_assignable_v<ElementPointer<float>, void*>);
}
TEST(ElementPointerTest, DynamicType) {
{
ElementPointer<void> p_null;
EXPECT_EQ(nullptr, p_null.data());
EXPECT_EQ(DataType(), p_null.dtype());
}
{
ElementPointer<void> p_null = nullptr;
EXPECT_EQ(nullptr, p_null.data());
EXPECT_EQ(DataType(), p_null.dtype());
}
float value;
{
ElementPointer<void> p = &value;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p = {static_cast<void*>(&value), dtype_v<float>};
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
static_assert(
!std::is_assignable_v<ElementPointer<void>, ElementPointer<const float>>);
static_assert(!std::is_assignable_v<ElementPointer<void>,
SharedElementPointer<const float>>);
static_assert(
!std::is_assignable_v<ElementPointer<void>, ElementPointer<const void>>);
static_assert(!std::is_assignable_v<ElementPointer<void>,
SharedElementPointer<const void>>);
static_assert(!std::is_assignable_v<ElementPointer<void>, void*>);
static_assert(!std::is_assignable_v<ElementPointer<void>, const float*>);
{
ElementPointer<void> p;
p = ElementPointer<float>(&value);
ElementPointer<void> p_copy = p;
EXPECT_EQ(&value, p_copy.data());
EXPECT_EQ(dtype_v<float>, p_copy.dtype());
}
{
ElementPointer<void> p;
p = ElementPointer<float>(&value);
ElementPointer<void> p_copy;
p_copy = p;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
p = ElementPointer<float>(&value);
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
p = &value;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
std::shared_ptr<float> shared_value = std::make_shared<float>();
p = SharedElementPointer<float>(shared_value);
EXPECT_EQ(shared_value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
std::shared_ptr<float> shared_value = std::make_shared<float>();
p = SharedElementPointer<void>(shared_value);
EXPECT_EQ(shared_value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
}
TEST(ElementPointerTest, StaticDataTypeCast) {
float value;
EXPECT_THAT(StaticDataTypeCast<std::int32_t>(ElementPointer<void>(&value)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast pointer with data type of float32 to "
"pointer with data type of int32"));
EXPECT_THAT(StaticDataTypeCast<std::int32_t>(
SharedElementPointer<void>(std::make_shared<float>())),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast pointer with data type of float32 to "
"pointer with data type of int32"));
}
TEST(SharedElementPointerTest, StaticType) {
std::shared_ptr<float> value = std::make_shared<float>();
SharedElementPointer<float> p = value;
EXPECT_EQ(value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
{
SharedElementPointer<float> p_copy = p;
EXPECT_EQ(value.get(), p_copy.data());
SharedElementPointer<float> p_move = std::move(p_copy);
EXPECT_EQ(value.get(), p_move.data());
SharedElementPointer<const float> p_const_move = std::move(p_move);
EXPECT_EQ(value.get(), p_const_move.data());
}
{
SharedElementPointer<const void> other = p;
EXPECT_EQ(value.get(), other.data());
EXPECT_EQ(other.dtype(), p.dtype());
EXPECT_EQ(3, value.use_count());
}
EXPECT_EQ(2, value.use_count());
{
ElementPointer<float> x = p;
EXPECT_EQ(value.get(), x.data());
}
{
ElementPointer<const void> x = p;
EXPECT_EQ(value.get(), x.data());
}
{
SharedElementPointer<void> shared_p_void = p;
auto p_float = StaticDataTypeCast<float>(shared_p_void).value();
static_assert(
std::is_same_v<decltype(p_float), SharedElementPointer<float>>);
EXPECT_EQ(value.get(), p_float.data());
}
{
float fvalue;
auto f_pointer = UnownedToShared(ElementPointer<float>(&fvalue));
static_assert(
std::is_same_v<decltype(f_pointer), SharedElementPointer<float>>);
EXPECT_EQ(&fvalue, f_pointer.data());
}
{
SharedElementPointer<float> p2;
EXPECT_TRUE(p2 == nullptr);
EXPECT_TRUE(nullptr == p2);
EXPECT_FALSE(p2 != nullptr);
EXPECT_FALSE(nullptr != p2);
EXPECT_FALSE(p2 == p);
EXPECT_TRUE(p2 != p);
p2 = p;
EXPECT_EQ(value.get(), p2.data());
EXPECT_FALSE(p2 == nullptr);
EXPECT_FALSE(nullptr == p2);
EXPECT_TRUE(p2 != nullptr);
EXPECT_TRUE(nullptr != p2);
EXPECT_TRUE(p2 == p);
EXPECT_FALSE(p2 != p);
}
{
SharedElementPointer<float> p2 = p;
SharedElementPointer<float> p2_move;
p2_move = std::move(p2);
EXPECT_TRUE(p2 == nullptr);
EXPECT_EQ(value.get(), p2_move.data());
}
{
SharedElementPointer<float> p2 = p;
SharedElementPointer<const float> p2_move;
p2_move = std::move(p2);
EXPECT_TRUE(p2 == nullptr);
EXPECT_EQ(value.get(), p2_move.data());
}
{
SharedElementPointer<float> p2 = p;
SharedElementPointer<float> p2_move;
p2_move = std::move(p2.pointer());
EXPECT_TRUE(p2 == nullptr);
EXPECT_EQ(value.get(), p2_move.data());
}
static_assert(!std::is_assignable_v<SharedElementPointer<float>,
SharedElementPointer<void>>);
static_assert(!std::is_assignable_v<SharedElementPointer<float>,
std::shared_ptr<void>>);
}
TEST(SharedElementPointerTest, DynamicType) {
std::shared_ptr<float> value = std::make_shared<float>();
SharedElementPointer<void> p = value;
EXPECT_EQ(value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
TEST(ElementPointerTest, Deduction) {
int* raw_int_ptr;
std::shared_ptr<int> shared_int_ptr;
ElementPointer<int> el_ptr;
ElementPointer<void> el_void_ptr;
SharedElementPointer<int> shared_el_ptr;
SharedElementPointer<void> shared_void_el_ptr;
{
auto x = ElementPointer(raw_int_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<int>>);
}
{
auto x = ElementPointer(shared_int_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<Shared<int>>>);
}
{
auto x = ElementPointer(el_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<int>>);
}
{
auto x = ElementPointer(el_void_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<void>>);
}
{
auto x = ElementPointer(shared_el_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<Shared<int>>>);
}
{
auto x = ElementPointer(shared_void_el_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<Shared<void>>>);
}
}
} |
708 | cpp | tensorflow/tensorflow | optional_debug_tools | tensorflow/lite/optional_debug_tools.cc | tensorflow/lite/optional_debug_tools_test.cc | #ifndef TENSORFLOW_LITE_OPTIONAL_DEBUG_TOOLS_H_
#define TENSORFLOW_LITE_OPTIONAL_DEBUG_TOOLS_H_
#include "tensorflow/lite/core/interpreter.h"
namespace tflite {
void PrintInterpreterState(const impl::Interpreter* interpreter,
int32_t tensor_name_display_length = 25,
int32_t tensor_type_display_length = 15,
int32_t alloc_type_display_length = 18);
}
#endif
#include "tensorflow/lite/optional_debug_tools.h"
#include <cassert>
#include <cinttypes>
#include <cstddef>
#include <cstdio>
#include <functional>
#include <limits>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
const char* AllocTypeName(TfLiteAllocationType type);
void PrintIntVector(const std::vector<int>& v,
bool collapse_consecutives = true,
bool add_newline = false);
class MemoryArenaInfo {
public:
explicit MemoryArenaInfo(TfLiteAllocationType type)
: allocation_type_(type) {}
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
if (tensor.allocation_type != allocation_type_) return;
if (tensor.data.data == nullptr) return;
if (tensor.bytes > max_tensor_mem_bytes_) {
max_tensor_mem_bytes_ = tensor.bytes;
max_tensor_id_ = tensor_index;
}
size_t current_start_addr = reinterpret_cast<size_t>(tensor.data.data);
size_t current_end_addr = current_start_addr + tensor.bytes;
if (current_start_addr < min_tensor_start_addr_) {
min_tensor_start_addr_ = current_start_addr;
}
if (current_end_addr > max_tensor_end_addr_) {
max_tensor_end_addr_ = current_end_addr;
}
TensorAllocInfo info;
info.tensor_id = tensor_index;
info.start_addr = current_start_addr;
info.bytes = tensor.bytes;
const auto result = alloc_info_.insert(info);
assert(result.second);
(void)result;
}
size_t GetArenaStartingAddress() const { return min_tensor_start_addr_; }
void Print() const {
printf("%s Info: ", AllocTypeName(allocation_type_));
if (max_tensor_end_addr_ == 0) {
printf("not holding any allocation.\n");
return;
}
printf("\nTensor %zu has the max size %zu bytes (%.3f MB).\n",
max_tensor_id_, max_tensor_mem_bytes_,
static_cast<float>(max_tensor_mem_bytes_) / (1 << 20));
const size_t arena_size = max_tensor_end_addr_ - min_tensor_start_addr_;
printf(
"This memory arena is estimated as[0x%zx, 0x%zx), taking %zu bytes "
"(%.3f MB).\n",
max_tensor_end_addr_, min_tensor_start_addr_, arena_size,
static_cast<float>(arena_size) / (1 << 20));
std::vector<const TensorAllocInfo*> arena_increase_trace;
size_t last_end_addr = 0;
for (const auto& info : alloc_info_) {
if (info.start_addr >= last_end_addr) {
arena_increase_trace.emplace_back(&info);
last_end_addr = info.start_addr + info.bytes;
}
}
printf(
"One possible set of tensors that have non-overlapping memory spaces "
"with each other, and they take up the whole arena:\n");
printf("Tensor ");
for (int i = 0; i < arena_increase_trace.size() - 1; ++i) {
printf("%zu -> ", arena_increase_trace[i]->tensor_id);
}
printf("%zu.\n", arena_increase_trace.back()->tensor_id);
}
private:
struct TensorAllocInfo {
size_t tensor_id;
size_t start_addr;
size_t bytes;
};
struct TensorAllocInfoCompare {
bool operator()(const TensorAllocInfo& lhs,
const TensorAllocInfo& rhs) const {
if (lhs.start_addr < rhs.start_addr) return true;
if (lhs.start_addr == rhs.start_addr) {
if (lhs.bytes > rhs.bytes) return true;
if (lhs.bytes == rhs.bytes) return lhs.tensor_id < rhs.tensor_id;
return false;
}
return false;
}
};
const TfLiteAllocationType allocation_type_;
size_t max_tensor_mem_bytes_ = 0;
size_t max_tensor_id_ = -1;
size_t min_tensor_start_addr_ = std::numeric_limits<size_t>::max();
size_t max_tensor_end_addr_ = 0;
std::set<TensorAllocInfo, TensorAllocInfoCompare> alloc_info_;
};
class DynamicMemoryInfo {
public:
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
if (tensor.allocation_type != kTfLiteDynamic) return;
if (tensor.data.data == nullptr) return;
if (tensor.bytes > max_tensor_mem_bytes_) {
max_tensor_mem_bytes_ = tensor.bytes;
max_tensor_ids_.clear();
max_tensor_ids_.push_back(tensor_index);
} else if (tensor.bytes == max_tensor_mem_bytes_) {
max_tensor_ids_.push_back(static_cast<int>(tensor_index));
}
total_mem_bytes_ += tensor.bytes;
num_total_tensors_++;
}
void Print() const {
printf("kTfLiteDynamic Info: ");
if (total_mem_bytes_ == 0) {
printf("not holding any allocation.\n");
return;
}
printf("\n%zu Tensors ", max_tensor_ids_.size());
PrintIntVector(max_tensor_ids_, false);
printf(" have the max size %zu bytes (%.3f MB).\n", max_tensor_mem_bytes_,
static_cast<float>(max_tensor_mem_bytes_) / (1 << 20));
printf("There are %d dynamic tensors, taking %zu bytes (%.3f MB).\n",
num_total_tensors_, total_mem_bytes_,
static_cast<float>(total_mem_bytes_) / (1 << 20));
}
private:
size_t max_tensor_mem_bytes_ = 0;
std::vector<int> max_tensor_ids_;
size_t total_mem_bytes_ = 0;
int num_total_tensors_ = 0;
};
class ModelTensorMemoryInfo {
public:
ModelTensorMemoryInfo()
: rw_info_(kTfLiteArenaRw),
rw_persistent_info_(kTfLiteArenaRwPersistent),
mmap_info_(kTfLiteMmapRo) {}
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
rw_info_.Update(tensor_index, tensor);
rw_persistent_info_.Update(tensor_index, tensor);
mmap_info_.Update(tensor_index, tensor);
dynamic_info_.Update(tensor_index, tensor);
}
int64_t GetOffsetFromArenaStart(const TfLiteTensor& tensor) const {
if (tensor.data.data == nullptr) return -1;
size_t tensor_address = reinterpret_cast<size_t>(tensor.data.data);
if (tensor.allocation_type == kTfLiteArenaRw) {
return static_cast<int64_t>(tensor_address -
rw_info_.GetArenaStartingAddress());
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
return static_cast<int64_t>(
tensor_address - rw_persistent_info_.GetArenaStartingAddress());
}
if (tensor.allocation_type == kTfLiteMmapRo) {
return static_cast<int64_t>(tensor_address -
mmap_info_.GetArenaStartingAddress());
}
return -1;
}
void Print() const {
printf("\n");
rw_info_.Print();
printf("\n");
rw_persistent_info_.Print();
printf("\n");
mmap_info_.Print();
printf("\n");
dynamic_info_.Print();
printf("\n");
}
private:
MemoryArenaInfo rw_info_;
MemoryArenaInfo rw_persistent_info_;
MemoryArenaInfo mmap_info_;
DynamicMemoryInfo dynamic_info_;
};
template <typename T>
void PrintTotalBytesOfTensors(const Subgraph& subgraph, const T& tensor_ids,
const std::string& prefix = " -> ") {
size_t total = 0;
for (const auto id : tensor_ids) {
const TfLiteTensor* tensor = subgraph.tensor(id);
if (tensor == nullptr) continue;
total += tensor->bytes;
}
printf("%s%zuB (%.2fMB)\n", prefix.c_str(), total,
static_cast<float>(total) / (1 << 20));
}
void PrintIntVector(const std::vector<int>& v, bool collapse_consecutives,
bool add_newline) {
if (v.empty()) {
printf("(null)");
if (add_newline) {
printf("\n");
}
return;
}
int range_start = v[0];
int range_end = range_start;
std::function<void(const char*)> print_range = [&](const char* suffix) {
if (range_end == range_start) {
printf("%d%s", range_start, suffix);
} else if (range_end == range_start + 1) {
printf("%d,%d%s", range_start, range_end, suffix);
} else {
printf("%d-%d%s", range_start, range_end, suffix);
}
};
printf("[");
for (int i = 1; i < v.size(); ++i) {
int current = v[i];
if (collapse_consecutives && (current == range_end + 1)) {
range_end = current;
} else {
print_range(",");
range_start = range_end = current;
}
}
print_range("]");
if (add_newline) {
printf("\n");
}
}
void PrintTfLiteIntVector(const TfLiteIntArray* v,
bool collapse_consecutives = true,
bool add_newline = false) {
std::vector<int> tmp;
if (!v || v->size <= 0) {
PrintIntVector(tmp, collapse_consecutives, add_newline);
return;
}
tmp.insert(tmp.end(), v->data, v->data + v->size);
PrintIntVector(tmp, collapse_consecutives, add_newline);
}
const char* TensorTypeName(TfLiteType type) {
switch (type) {
case kTfLiteNoType:
return "kTfLiteNoType";
case kTfLiteFloat32:
return "kTfLiteFloat32";
case kTfLiteInt32:
return "kTfLiteInt32";
case kTfLiteUInt32:
return "kTfLiteUInt32";
case kTfLiteUInt8:
return "kTfLiteUInt8";
case kTfLiteInt8:
return "kTfLiteInt8";
case kTfLiteInt64:
return "kTfLiteInt64";
case kTfLiteUInt64:
return "kTfLiteUInt64";
case kTfLiteString:
return "kTfLiteString";
case kTfLiteBool:
return "kTfLiteBool";
case kTfLiteUInt16:
return "kTfLiteUInt16";
case kTfLiteInt16:
return "kTfLiteInt16";
case kTfLiteComplex64:
return "kTfLiteComplex64";
case kTfLiteComplex128:
return "kTfLiteComplex128";
case kTfLiteFloat16:
return "kTfLiteFloat16";
case kTfLiteBFloat16:
return "kTfLiteBFloat16";
case kTfLiteFloat64:
return "kTfLiteFloat64";
case kTfLiteResource:
return "kTfLiteResource";
case kTfLiteVariant:
return "kTfLiteVariant";
case kTfLiteInt4:
return "kTfLiteInt4";
}
return "(invalid)";
}
const char* AllocTypeName(TfLiteAllocationType type) {
switch (type) {
case kTfLiteMemNone:
return "kTfLiteMemNone";
case kTfLiteMmapRo:
return "kTfLiteMmapRo";
case kTfLiteDynamic:
return "kTfLiteDynamic";
case kTfLiteArenaRw:
return "kTfLiteArenaRw";
case kTfLiteArenaRwPersistent:
return "kTfLiteArenaRwPersistent";
case kTfLitePersistentRo:
return "kTfLitePersistentRo";
case kTfLiteCustom:
return "kTfLiteCustom";
case kTfLiteVariantObject:
return "kTfLiteVariantObject";
}
return "(invalid)";
}
std::string TruncateString(const char* str, int size_limit,
bool truncate_at_end = false) {
if (str == nullptr) return "(nil)";
std::string truncated(str);
const size_t length = truncated.size();
if (length <= size_limit) return truncated;
if (size_limit <= 3) return std::string(size_limit, '.');
if (truncate_at_end) {
truncated.resize(size_limit);
truncated.replace(size_limit - 3, 3, "...");
} else {
truncated.erase(0, length - size_limit);
truncated.replace(0, 3, "...");
}
return truncated;
}
}
void PrintInterpreterState(const Interpreter* interpreter,
const int32_t tensor_name_display_length,
const int32_t tensor_type_display_length,
const int32_t alloc_type_display_length) {
const size_t num_subgraphs = interpreter->subgraphs_size();
printf("Interpreter has %zu subgraphs.\n\n", num_subgraphs);
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
printf("-----------Subgraph-%d has %zu tensors and %zu nodes------------\n",
i, subgraph.tensors_size(), subgraph.nodes_size());
printf("%zu Inputs: ", subgraph.inputs().size());
PrintIntVector(subgraph.inputs());
PrintTotalBytesOfTensors(subgraph, subgraph.inputs());
printf("%zu Outputs: ", subgraph.outputs().size());
PrintIntVector(subgraph.outputs());
PrintTotalBytesOfTensors(subgraph, subgraph.outputs());
printf("\n");
ModelTensorMemoryInfo tensor_mem_info;
for (size_t tensor_index = 0; tensor_index < subgraph.tensors_size();
tensor_index++) {
const TfLiteTensor* tensor =
subgraph.tensor(static_cast<int>(tensor_index));
tensor_mem_info.Update(tensor_index, *tensor);
}
std::stringstream var_length_fs;
var_length_fs << "%-" << tensor_name_display_length << "s %-"
<< tensor_type_display_length << "s %-"
<< alloc_type_display_length << "s";
printf(
("Tensor %3s " + var_length_fs.str() + " %-18s %-10s %-16s\n").c_str(),
"ID", "Name", "Type", "AllocType", "Size (Bytes/MB)", "Shape",
"MemAddr-Offset");
for (size_t tensor_index = 0; tensor_index < subgraph.tensors_size();
tensor_index++) {
const TfLiteTensor* tensor =
subgraph.tensor(static_cast<int>(tensor_index));
printf(("Tensor %3zu " + var_length_fs.str() + " %-8zu / %.2f ").c_str(),
tensor_index,
TruncateString(tensor->name, tensor_name_display_length,
true)
.c_str(),
TruncateString(TensorTypeName(tensor->type),
tensor_type_display_length)
.c_str(),
TruncateString(AllocTypeName(tensor->allocation_type),
alloc_type_display_length)
.c_str(),
tensor->bytes, (static_cast<float>(tensor->bytes) / (1 << 20)));
PrintTfLiteIntVector(tensor->dims, false);
const int64_t start_offset =
tensor_mem_info.GetOffsetFromArenaStart(*tensor);
const int64_t end_offset =
start_offset == -1
? -1
: start_offset + static_cast<int64_t>(tensor->bytes);
printf(" [%" PRId64 ", %" PRId64 ")\n", start_offset, end_offset);
}
tensor_mem_info.Print();
subgraph.DumpMemoryPlannerDebugInfo();
std::vector<bool> replaced_node_bits;
std::vector<size_t> replaced_by_node;
replaced_node_bits.resize(subgraph.nodes_size());
replaced_by_node.resize(subgraph.nodes_size());
bool has_delegate_applied = false;
for (size_t node_index = 0; node_index < subgraph.nodes_size();
node_index++) {
replaced_node_bits[node_index] = false;
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(static_cast<int>(node_index));
const TfLiteNode& node = node_and_reg->first;
auto* const delegate = node.delegate;
if (delegate != nullptr) {
has_delegate_applied = true;
auto* params = static_cast<TfLiteDelegateParams*>(node.builtin_data);
for (int nid : TfLiteIntArrayView(params->nodes_to_replace)) {
replaced_node_bits[nid] = true;
replaced_by_node[nid] = node_index;
}
}
}
for (size_t node_index = 0; node_index < subgraph.nodes_size();
node_index++) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(static_cast<int>(node_index));
const TfLiteNode& node = node_and_reg->first;
const TfLiteRegistration& reg = node_and_reg->second;
std::string delegated_status;
bool is_node_delegated = false;
TfLiteIntArray empty_int_array;
empty_int_array.size = 0;
if (node.delegate == nullptr) {
if (replaced_node_bits[node_index]) {
delegated_status = "(delegated by node ";
delegated_status.append(std::to_string(replaced_by_node[node_index]));
delegated_status.append(")");
is_node_delegated = true;
} else {
delegated_status = "(not delegated)";
}
}
if (reg.custom_name != nullptr) {
printf("Node %3zu Operator Custom Name %s %s\n", node_index,
reg.custom_name, delegated_status.c_str());
} else {
printf("Node %3zu Operator Builtin Code %3d %s %s\n", node_index,
reg.builtin_code, EnumNamesBuiltinOperator()[reg.builtin_code],
delegated_status.c_str());
}
printf(" %d Input Tensors:",
node.inputs != nullptr ? node.inputs->size : 0);
if (node.inputs) {
PrintTfLiteIntVector(
node.inputs,
(node.delegate != nullptr));
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.inputs));
}
printf(" %d Output Tensors:",
node.outputs != nullptr ? node.outputs->size : 0);
if (node.outputs) {
PrintTfLiteIntVector(node.outputs);
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.outputs));
}
if (node.intermediates && node.intermediates->size) {
printf(" %d Intermediate Tensors:", node.intermediates->size);
PrintTfLiteIntVector(node.intermediates);
PrintTotalBytesOfTensors(subgraph,
is_node_delegated
? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.intermediates));
}
if (node.temporaries && node.temporaries->size) {
printf(" %d Temporary Tensors:", node.temporaries->size);
PrintTfLiteIntVector(node.temporaries);
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.temporaries));
}
}
printf("\nExecution plan as the list of %zu nodes invoked in-order: ",
subgraph.execution_plan().size());
PrintIntVector(subgraph.execution_plan(), true,
true);
if (has_delegate_applied) {
printf("Among these nodes in the execution plan:\n");
for (int node_id : subgraph.execution_plan()) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(node_id);
const TfLiteNode& node = node_and_reg->first;
auto* const delegate = node.delegate;
if (delegate == nullptr) continue;
const char* delegate_name = node_and_reg->second.custom_name;
auto* delegate_params =
static_cast<TfLiteDelegateParams*>(node.builtin_data);
printf(" Node %d is a %s node (%p), which has delegated %d nodes: ",
node_id, delegate_name == nullptr ? "[n/a]" : delegate_name,
delegate, delegate_params->nodes_to_replace->size);
PrintTfLiteIntVector(delegate_params->nodes_to_replace,
true,
true);
}
}
printf("--------------Subgraph-%d dump has completed--------------\n\n", i);
}
printf("--------------Memory Arena Status Start--------------\n");
size_t total_arena_memory_bytes = 0;
size_t total_dynamic_memory_bytes = 0;
size_t total_resource_bytes = 0;
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
Subgraph::SubgraphAllocInfo alloc_info;
subgraph.GetMemoryAllocInfo(&alloc_info);
total_arena_memory_bytes += alloc_info.arena_size;
total_arena_memory_bytes += alloc_info.arena_persist_size;
total_dynamic_memory_bytes += alloc_info.dynamic_size;
if (i == 0) {
total_resource_bytes = alloc_info.resource_size;
}
}
size_t total_memory_bytes = total_arena_memory_bytes +
total_dynamic_memory_bytes + total_resource_bytes;
printf("Total memory usage: %zu bytes (%.3f MB)\n", total_memory_bytes,
static_cast<float>(total_memory_bytes) / (1 << 20));
printf("- Total arena memory usage: %zu bytes (%.3f MB)\n",
total_arena_memory_bytes,
static_cast<float>(total_arena_memory_bytes) / (1 << 20));
printf("- Total dynamic memory usage: %zu bytes (%.3f MB)\n",
total_dynamic_memory_bytes,
static_cast<float>(total_dynamic_memory_bytes) / (1 << 20));
if (total_resource_bytes) {
printf("- Total resource memory usage: %zu bytes (%.3f MB)\n",
total_resource_bytes,
static_cast<float>(total_resource_bytes) / (1 << 20));
}
putchar('\n');
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
Subgraph::SubgraphAllocInfo alloc_info;
subgraph.GetMemoryAllocInfo(&alloc_info);
if (alloc_info.arena_size) {
printf(
"Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Arena (Normal)",
alloc_info.arena_size,
static_cast<float>(alloc_info.arena_size * 100) / total_memory_bytes);
}
if (alloc_info.arena_persist_size) {
printf("Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Arena (Persistent)",
alloc_info.arena_persist_size,
static_cast<float>(alloc_info.arena_persist_size * 100) /
total_memory_bytes);
}
if (alloc_info.dynamic_size) {
printf("Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Dyanmic Tensors",
alloc_info.dynamic_size,
static_cast<float>(alloc_info.dynamic_size * 100) /
total_memory_bytes);
}
}
printf("--------------Memory Arena Status End--------------\n\n");
}
} | #include "tensorflow/lite/optional_debug_tools.h"
#include <algorithm>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace {
void InitInputTensorData(Interpreter* interpreter) {
ASSERT_EQ(interpreter->inputs().size(), 1);
TfLiteTensor* t = interpreter->input_tensor(0);
ASSERT_EQ(t->type, kTfLiteFloat32);
float* data = static_cast<float*>(t->data.data);
int num_elements = t->bytes / sizeof(float);
std::fill(data, data + num_elements, 1.0f);
}
}
TEST(OptionalDebugTools, PrintInterpreterState) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
PrintInterpreterState(interpreter.get());
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
InitInputTensorData(interpreter.get());
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
}
TEST(OptionalDebugTools, PrintInterpreterStateWithDelegate) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
ASSERT_EQ(interpreter->ModifyGraphWithDelegate(xnnpack_delegate.get()),
kTfLiteOk);
InitInputTensorData(interpreter.get());
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
}
} |
709 | cpp | tensorflow/tensorflow | simple_planner | tensorflow/lite/simple_planner.cc | tensorflow/lite/simple_planner_test.cc | #ifndef TENSORFLOW_LITE_SIMPLE_PLANNER_H_
#define TENSORFLOW_LITE_SIMPLE_PLANNER_H_
#include <cassert>
#include <cstdint>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/memory_planner.h"
#include "tensorflow/lite/util.h"
namespace tflite {
struct SimpleAlloc {
SimpleAlloc() { reset(); }
size_t size;
int32_t node;
char* ptr;
inline void reset() {
size = 0;
node = 0;
ptr = nullptr;
}
inline bool alloc(size_t new_size, int32_t new_first_node) {
if (new_size == 0) {
return false;
}
size = new_size;
node = new_first_node;
assert(ptr == nullptr);
ptr = static_cast<char*>(malloc(new_size));
return true;
}
inline void free() {
if (ptr) {
::free(ptr);
}
reset();
}
};
class SimplePlanner : public MemoryPlanner {
public:
SimplePlanner(TfLiteContext* context, std::unique_ptr<GraphInfo> graph_info);
~SimplePlanner() override;
SimplePlanner(const SimplePlanner&) = delete;
SimplePlanner& operator=(const SimplePlanner&) = delete;
TfLiteStatus ResetAllocations() override;
TfLiteStatus ResetAllocationsAfter(int node) override;
TfLiteStatus PlanAllocations() override;
TfLiteStatus ExecuteAllocations(int first_node, int last_node) override;
TfLiteStatus ReleaseNonPersistentMemory() override;
TfLiteStatus AcquireNonPersistentMemory() override;
bool HasNonPersistentMemory() override { return true; };
void DumpDebugInfo(const std::vector<int>& execution_plan) const override{};
void GetAllocInfo(size_t* arena_size,
size_t* arena_persist_size) const override{};
private:
void FreeAllAllocations();
TfLiteStatus ResolveTensorAllocation(int tensor_index);
TfLiteContext* context_;
std::unique_ptr<GraphInfo> graph_info_;
std::vector<SimpleAlloc> allocs_;
std::vector<int32_t> alloc_node_;
std::vector<int32_t> dealloc_node_;
};
}
#endif
#include "tensorflow/lite/simple_planner.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
}
SimplePlanner::SimplePlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info)
: context_(context), graph_info_(std::move(graph_info)) {}
SimplePlanner::~SimplePlanner() { FreeAllAllocations(); }
void SimplePlanner::FreeAllAllocations() {
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
allocs_[i].free();
}
}
TfLiteStatus SimplePlanner::ResetAllocations() {
FreeAllAllocations();
allocs_.clear();
allocs_.resize(graph_info_->num_tensors());
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::PlanAllocations() {
TF_LITE_ENSURE_STATUS(ResetAllocations());
alloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
std::vector<int> refcounts(graph_info_->num_tensors(), 0);
auto allocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] != kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
alloc_node_[tensor] = node;
return kTfLiteOk;
};
auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] == kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
dealloc_node_[tensor] = node;
return kTfLiteOk;
};
for (int tensor_index : graph_info_->outputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
for (int tensor_index : graph_info_->variables()) {
refcounts[tensor_index]++;
TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
}
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
}
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]--;
if (refcounts[tensor_index] == 0) {
TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ExecuteAllocations(int first_node, int last_node) {
alloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
allocs_.resize(graph_info_->num_tensors());
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = first_node;
i <= static_cast<size_t>(last_node) && i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_temporaries = node.temporaries;
for (int j = 0; j < node_temporaries->size; ++j) {
int tensor_index = node_temporaries->data[j];
alloc_node_[tensor_index] = i;
dealloc_node_[tensor_index] = i;
}
}
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
bool allocated = false;
if (alloc_node_[i] >= first_node && alloc_node_[i] <= last_node) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[i].size != 0) {
allocs_[i].free();
}
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
} else if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
allocs_[i].size == 0) {
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
}
}
if (allocated) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ReleaseNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::AcquireNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResolveTensorAllocation(int tensor_index) {
TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size != 0) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/simple_planner.h"
#include <algorithm>
#include <cstdarg>
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
class TestOp {
public:
TestOp(std::initializer_list<int> inputs, std::initializer_list<int> outputs,
std::initializer_list<int> temporaries)
: inputs_(inputs), outputs_(outputs), temporaries_(temporaries) {}
const std::vector<int>& inputs() const { return inputs_; }
const std::vector<int>& outputs() const { return outputs_; }
const std::vector<int>& temporaries() const { return temporaries_; }
const TfLiteRegistration& registration() const { return registration_; }
private:
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> temporaries_;
TfLiteRegistration registration_{};
};
class TestGraph {
public:
TestGraph(std::initializer_list<int> inputs,
std::initializer_list<TestOp> nodes,
std::initializer_list<int> outputs)
: inputs_(inputs), outputs_(outputs) {
int max_tensor_index = 0;
for (int t : inputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (int t : outputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (const auto& node : nodes) {
auto int_array = [](const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
};
registrations_.push_back(node.registration());
nodes_.push_back(TfLiteNode());
nodes_.back().inputs = int_array(node.inputs());
for (int t : node.inputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().outputs = int_array(node.outputs());
for (int t : node.outputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().temporaries = int_array(node.temporaries());
for (int t : node.temporaries()) {
max_tensor_index = std::max(max_tensor_index, t);
}
}
for (int i = 0; i <= max_tensor_index; ++i) {
tensors_.push_back(TfLiteTensor());
tensors_.back().allocation_type = kTfLiteArenaRw;
tensors_.back().bytes = (i + 1) * 3;
}
}
~TestGraph() {
for (auto node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
TfLiteIntArrayFree(node.temporaries);
}
}
const std::vector<TfLiteNode>& nodes() { return nodes_; }
std::vector<TfLiteTensor>* tensors() { return &tensors_; }
const std::vector<int>& inputs() { return inputs_; }
const std::vector<int>& outputs() { return outputs_; }
const std::vector<int>& variables() { return variables_; }
const std::vector<TfLiteRegistration>& registrations() {
return registrations_;
}
void SetVariables(const std::vector<int>& variables) {
variables_ = variables;
}
void Swap(TestGraph* other) {
std::swap(nodes_, other->nodes_);
std::swap(tensors_, other->tensors_);
std::swap(inputs_, other->inputs_);
std::swap(outputs_, other->outputs_);
std::swap(variables_, other->variables_);
}
private:
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<TfLiteRegistration> registrations_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
};
class TestGraphInfo : public GraphInfo {
public:
explicit TestGraphInfo(TestGraph* graph) : graph_(graph) {}
size_t num_tensors() const override { return graph_->tensors()->size(); }
const TfLiteRegistration& registration(size_t index) const override {
return graph_->registrations()[index];
}
TfLiteTensor* tensor(size_t index) override {
return &graph_->tensors()->at(index);
}
TfLiteTensor* tensors() override { return graph_->tensors()->data(); }
size_t num_execution_nodes() const override { return graph_->nodes().size(); }
size_t num_total_nodes() const override { return graph_->nodes().size(); }
const TfLiteNode& node(size_t index) const override {
return graph_->nodes()[index];
}
size_t node_index(size_t index) const override { return index; }
const std::vector<int>& inputs() const override { return graph_->inputs(); }
const std::vector<int>& outputs() const override { return graph_->outputs(); }
const std::vector<int>& variables() const override {
return graph_->variables();
}
private:
TestGraph* graph_;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
LOG(INFO) << temp_buffer;
}
class SimplePlannerTest : public ::testing::Test {
protected:
void SetGraph(TestGraph* graph, bool preserve_all_tensors = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_ = std::make_unique<SimplePlanner>(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)));
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void SwapGraph(TestGraph* graph) {
graph_->Swap(graph);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void Execute(int start, int end) {
CHECK(planner_->ExecuteAllocations(start, end) == kTfLiteOk);
}
void ReleaseNonPersistentMemory() {
CHECK(planner_->ReleaseNonPersistentMemory() == kTfLiteOk);
}
void AcquireNonPersistentMemory() {
CHECK(planner_->AcquireNonPersistentMemory() == kTfLiteOk);
}
void ResetAllocationsAfter(int node) {
CHECK(planner_->ResetAllocationsAfter(node) == kTfLiteOk);
}
bool HasNonPersistentMemory() {
return planner_ && planner_->HasNonPersistentMemory();
}
bool IsAllocated(int tensor_index) {
return (*graph_->tensors())[tensor_index].data.raw != nullptr;
}
TfLiteContext context_;
TestGraph* graph_;
std::unique_ptr<SimplePlanner> planner_;
};
TEST_F(SimplePlannerTest, EmptyGraph) {
TestGraph graph({}, {}, {});
SetGraph(&graph);
Execute(0, 10);
}
TEST_F(SimplePlannerTest, GraphWithNoOps) {
TestGraph graph({0, 10}, {}, {5, 11});
SetGraph(&graph);
Execute(0, 10);
EXPECT_FALSE(IsAllocated(5));
EXPECT_FALSE(IsAllocated(11));
}
TEST_F(SimplePlannerTest, ZeroSizedTensors) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
(*graph.tensors())[1].bytes = 0;
SetGraph(&graph);
ASSERT_EQ(planner_->ExecuteAllocations(0, 10), kTfLiteOk);
EXPECT_FALSE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
}
TEST_F(SimplePlannerTest, SimpleGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphInputsPreserved) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_FALSE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithPersistentResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
(*graph.tensors())[5].allocation_type = kTfLiteArenaRwPersistent;
SetGraph(&graph);
Execute(0, 10);
void* tensor5_ptr = (*graph.tensors())[5].data.raw;
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
Execute(0, 10);
EXPECT_TRUE(tensor5_ptr == (*graph.tensors())[5].data.raw);
}
TEST_F(SimplePlannerTest, SimpleGraphOptionalOutput) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{-1, 3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
}
} |
710 | cpp | tensorflow/tensorflow | mutable_op_resolver_utils | tensorflow/lite/mutable_op_resolver_utils.cc | tensorflow/lite/mutable_op_resolver_utils_test.cc | #ifndef TENSORFLOW_LITE_MUTABLE_OP_RESOLVER_UTILS_H_
#define TENSORFLOW_LITE_MUTABLE_OP_RESOLVER_UTILS_H_
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/mutable_op_resolver.h"
namespace tflite {
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op);
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op,
int min_version, int max_version);
}
#endif
#include "tensorflow/lite/mutable_op_resolver_utils.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op,
int min_version, int max_version) {
TfLiteRegistration registration{};
registration.builtin_code = TfLiteOperatorGetBuiltInCode(op);
registration.custom_name = TfLiteOperatorGetCustomName(op);
registration.version = TfLiteOperatorGetVersion(op);
registration.registration_external = const_cast<TfLiteOperator*>(op);
if (registration.custom_name != nullptr) {
mutable_op_resolver->AddCustom(registration.custom_name, ®istration,
min_version, max_version);
} else {
mutable_op_resolver->AddBuiltin(BuiltinOperator(registration.builtin_code),
®istration, min_version, max_version);
}
}
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op) {
int version = TfLiteOperatorGetVersion(op);
AddOp(mutable_op_resolver, op, version, version);
}
} | #include "tensorflow/lite/mutable_op_resolver_utils.h"
#include <stddef.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/c/common_internal.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/test_util.h"
namespace tflite {
namespace {
TfLiteStatus DummyInvoke(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) {
return kTfLiteOk;
}
TfLiteStatus DummyPrepare(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) {
return kTfLiteOk;
}
TfLiteOperator* GetDummyRegistration() {
static TfLiteOperator* registration = []() {
auto* op = TfLiteOperatorCreateWithData(
kTfLiteBuiltinCustom, "dummy", 1, nullptr);
TfLiteOperatorSetPrepareWithData(op, DummyPrepare);
TfLiteOperatorSetInvokeWithData(op, DummyInvoke);
return op;
}();
return registration;
}
TfLiteOperator* GetAdditionOpRegistration() {
static TfLiteOperator* registration = []() {
auto* r =
TfLiteOperatorCreateWithData(kTfLiteBuiltinAdd, nullptr,
1, nullptr);
TfLiteOperatorSetInvokeWithData(r, DummyInvoke);
return r;
}();
return registration;
}
using MutableOpResolverTest = tflite::testing::Test;
TEST_F(MutableOpResolverTest, FindOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(
TfLiteOperatorGetBuiltInCode(found_registration->registration_external),
kTfLiteBuiltinAdd);
EXPECT_EQ(TfLiteOperatorGetVersion(found_registration->registration_external),
1);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_ADD);
EXPECT_EQ(found_registration->version, 1);
}
TEST_F(MutableOpResolverTest, FindMissingOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, RegisterOpWithSingleVersion) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 2);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, RegisterOpWithMultipleVersions) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 3);
}
TEST_F(MutableOpResolverTest, FindOpWithUnsupportedVersions) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 4);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, FindCustomOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("dummy", 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 1);
}
TEST_F(MutableOpResolverTest, FindMissingCustomOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("whatever", 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, FindCustomOpWithUnsupportedVersion) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("dummy", 2);
EXPECT_EQ(found_registration, nullptr);
}
}
} |
711 | cpp | tensorflow/tensorflow | allocation | third_party/xla/xla/service/memory_space_assignment/allocation.cc | tensorflow/lite/allocation_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_ALLOCATION_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_ALLOCATION_H_
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/shape.h"
namespace xla::memory_space_assignment {
enum class MemorySpace : std::uint8_t { kDefault, kAlternate };
class Allocation {
public:
virtual ~Allocation() = default;
virtual HloPosition defining_position() const = 0;
std::optional<int64_t> cross_program_prefetch_index() const;
int64_t start_time() const { return start_time_; }
int64_t end_time() const { return end_time_; }
virtual int64_t earliest_available_time() const = 0;
void set_start_time(int64_t start_time) { start_time_ = start_time; }
void set_end_time(int64_t end_time) { end_time_ = end_time; }
void Extend(int64_t end_time) { end_time_ = std::max(end_time_, end_time); }
MemorySpace memory_space() const { return memory_space_; }
std::optional<HeapSimulator::Chunk> maybe_chunk() const { return chunk_; }
HeapSimulator::Chunk chunk() const;
HeapSimulator::Chunk* mutable_chunk() { return &*chunk_; }
void set_offset(int64_t offset);
bool is_scoped_allocation() const { return is_scoped_allocation_; }
bool is_in_alternate_mem() const;
bool is_in_default_mem() const;
const std::vector<HloUse>& uses() const { return uses_; }
void clear_uses() { uses_.clear(); }
bool has_no_uses() const { return uses_.empty(); }
void AddUse(HloUse use);
absl::Status UpdateUses(HloComputation* computation,
HloInstruction* producing_instruction);
virtual bool is_pinned_allocation() const = 0;
virtual bool is_copy_allocation() const = 0;
virtual bool is_sliced_copy_allocation() const = 0;
bool is_copy_like_allocation() const;
HloInstruction* AddGetTupleElements() const;
virtual absl::Status Process() = 0;
virtual absl::Status PostProcess() = 0;
virtual void MarkIfNeeded(
absl::flat_hash_set<const Allocation*>& needed_allocations) const = 0;
virtual void MarkNeeded(
absl::flat_hash_set<const Allocation*>& needed_allocations) const = 0;
virtual std::string ToString() const = 0;
virtual bool operator==(const Allocation& other) const = 0;
protected:
Allocation(HloPosition defining_position, MemorySpace memory_space,
std::optional<HeapSimulator::Chunk> chunk, int64_t start_time,
int64_t end_time, bool is_scoped_allocation,
std::optional<int64_t> cross_program_prefetch_index);
HloPosition original_defining_position() const;
void set_original_defining_position(HloPosition defining_position);
bool base_is_equal(const Allocation& other) const;
private:
HloPosition original_defining_position_;
MemorySpace memory_space_;
std::optional<HeapSimulator::Chunk> chunk_;
int64_t start_time_;
int64_t end_time_;
const bool is_scoped_allocation_;
std::vector<HloUse> uses_;
std::optional<int64_t> cross_program_prefetch_index_;
};
using AllocationSequence = std::vector<std::unique_ptr<Allocation>>;
std::tuple<int64_t, bool, int64_t> GetAllocationSortTuple(
const std::unique_ptr<Allocation>& allocation);
void SortAllocationSequence(AllocationSequence& allocations);
std::string AllocationSequenceToString(AllocationSequence& allocations,
bool sort_allocations = false);
std::vector<Allocation*> GetAllocationSequenceInRawPointers(
AllocationSequence& allocations);
class PinnedAllocation final : public Allocation {
public:
PinnedAllocation(HloPosition defining_position, MemorySpace memory_space,
std::optional<HeapSimulator::Chunk> chunk,
int64_t start_time, int64_t end_time,
bool is_scoped_allocation);
HloPosition defining_position() const override;
int64_t earliest_available_time() const override { return start_time(); }
bool is_pinned_allocation() const override { return true; }
bool is_copy_allocation() const override { return false; }
bool is_sliced_copy_allocation() const override { return false; }
absl::Status Process() override;
absl::Status PostProcess() override { return absl::OkStatus(); }
void MarkIfNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
void MarkNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
std::string ToString() const override;
bool operator==(const Allocation& other) const override;
bool operator==(const PinnedAllocation& other) const;
};
class CopyAllocation final : public Allocation {
public:
CopyAllocation(
Allocation& prev_allocation, MemorySpace memory_space,
std::optional<HeapSimulator::Chunk> chunk,
int64_t copy_start_schedule_after_time,
int64_t copy_done_schedule_before_time, int64_t end_time,
std::optional<int64_t> cross_program_prefetch_index = std::nullopt);
HloPosition defining_position() const override;
int64_t earliest_available_time() const override;
bool is_pinned_allocation() const override { return false; }
bool is_copy_allocation() const override { return true; }
bool is_sliced_copy_allocation() const override { return false; }
absl::Status Process() override;
absl::Status PostProcess() override { return absl::OkStatus(); }
void MarkIfNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
void MarkNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
std::string ToString() const override;
bool operator==(const Allocation& other) const override;
bool operator==(const CopyAllocation& other) const;
const Allocation& prev_allocation() { return prev_allocation_; }
Allocation& mutable_prev_allocation() { return prev_allocation_; }
HloInstruction* copy_start() const { return copy_start_; }
HloInstruction* copy_done() const { return copy_done_; }
void set_copy_start_schedule_after(int64_t copy_start_schedule_after);
void set_copy_done_schedule_before(int64_t copy_done_schedule_before);
int64_t copy_start_schedule_after() const;
int64_t copy_done_schedule_before() const;
private:
Allocation& prev_allocation_;
int64_t copy_start_schedule_after_;
int64_t copy_done_schedule_before_;
HloInstruction* copy_start_ = nullptr;
HloInstruction* copy_done_ = nullptr;
};
class SlicedCopyAllocation final : public Allocation {
public:
struct SliceDetail {
std::string ToString() const;
std::tuple<const SliceDecision&, int64_t, int64_t, const HloInstruction*,
const HloInstruction*>
ToTuple() const;
bool operator==(const SliceDetail& other) const;
absl::Status CreateAsyncSlice(const Shape& original_shape,
HloInstruction& producer,
HloComputation& parent);
SliceDecision slice_decision;
int64_t copy_start_after_time = -1;
int64_t copy_done_before_time = -1;
HloInstruction* copy_start = nullptr;
HloInstruction* copy_done = nullptr;
};
SlicedCopyAllocation(
const Allocation& prev_allocation, MemorySpace memory_space,
std::vector<SliceDecision> slice_decisions_sorted_by_exclusive_start_time,
int64_t copy_done_schedule_before_time, int64_t end_time,
const SlicedPrefetchOptions& sliced_prefetch_options,
absl::FunctionRef<Shape(const Shape&)> get_equivalent_s8_shape_fn);
HloPosition defining_position() const override;
int64_t earliest_available_time() const override;
bool is_pinned_allocation() const override { return false; }
bool is_copy_allocation() const override { return false; }
bool is_sliced_copy_allocation() const override { return true; }
absl::Status Process() override;
absl::Status PostProcess() override { return absl::OkStatus(); }
void MarkIfNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
void MarkNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
std::string ToString() const override;
bool operator==(const Allocation& other) const override;
bool operator==(const SlicedCopyAllocation& other) const;
std::vector<int64_t> SliceOffsetsSortedByStartTime() const;
void AddDiffToAllSliceOffsets(int64_t diff);
void ImportRepackedSliceData(const SlicedAllocationData& data);
const std::vector<SliceDetail>& slice_details_sorted_by_start_time() const;
std::vector<SliceDetail>& mutable_slice_details_sorted_by_start_time();
HloInstruction* concat() const { return concat_; }
private:
SlicedCopyAllocation() = delete;
absl::Status CreateBitcastConcat(const Shape& shape,
absl::Span<HloInstruction* const> slices);
Shape original_shape_to_slice_;
const Allocation& prev_allocation_;
std::vector<SliceDetail> slice_details_sorted_by_exclusive_start_time_;
HloInstruction* concat_ = nullptr;
const SlicedPrefetchOptions& sliced_prefetch_options_;
absl::FunctionRef<Shape(const Shape&)> get_equivalent_s8_shape_fn_;
};
class MirroredAllocation final : public Allocation {
public:
MirroredAllocation(const Allocation& original_allocation, int64_t time);
HloPosition defining_position() const override;
int64_t earliest_available_time() const override { return start_time(); }
bool is_pinned_allocation() const override { return false; }
bool is_copy_allocation() const override { return false; }
bool is_sliced_copy_allocation() const override { return false; }
absl::Status Process() override;
absl::Status PostProcess() override { return absl::OkStatus(); }
void MarkIfNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
void MarkNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
std::string ToString() const override;
bool operator==(const Allocation& other) const override;
bool operator==(const MirroredAllocation& other) const;
private:
const Allocation& original_allocation_;
};
class ParentAllocation final : public Allocation {
public:
ParentAllocation(const Allocation& original_allocation,
HloInstruction* calling_instruction, HloPosition position,
int64_t time);
HloPosition defining_position() const override;
int64_t earliest_available_time() const override { return start_time(); }
bool is_pinned_allocation() const override { return false; }
bool is_copy_allocation() const override { return false; }
bool is_sliced_copy_allocation() const override { return false; }
absl::Status Process() override;
absl::Status PostProcess() override;
void MarkIfNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
void MarkNeeded(absl::flat_hash_set<const Allocation*>& needed_allocations)
const override;
std::string ToString() const override;
bool operator==(const Allocation& other) const override;
bool operator==(const ParentAllocation& other) const;
private:
const Allocation& original_allocation_;
HloInstruction* calling_instruction_;
};
}
#endif
#include "xla/service/memory_space_assignment/allocation.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/service/time_utils.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
namespace xla::memory_space_assignment {
namespace {
std::string UsesToString(const std::vector<HloUse>& uses) {
if (uses.empty()) {
return "none";
}
std::vector<std::string> uses_str;
uses_str.reserve(uses.size());
for (const auto& use : uses) {
uses_str.push_back(use.ToString());
}
return absl::StrJoin(uses_str, ",");
}
int64_t GetSlicedCopyAllocationExclusiveStartTime(
const std::vector<SliceDecision>&
slice_decisions_sorted_by_exclusive_start_time) {
if (slice_decisions_sorted_by_exclusive_start_time.empty()) {
return -1;
}
return slice_decisions_sorted_by_exclusive_start_time.front()
.exclusive_start_time;
}
std::optional<HeapSimulator::Chunk> GetSlicedCopyAllocationChunk(
const std::vector<SliceDecision>& slice_decisions_sorted_by_start_time) {
if (slice_decisions_sorted_by_start_time.empty()) {
return std::nullopt;
}
auto offset_cmp = [](const SliceDecision& lhs, const SliceDecision& rhs) {
return lhs.chunk.offset < rhs.chunk.offset;
};
auto end_cmp = [](const SliceDecision& lhs, const SliceDecision& rhs) {
return lhs.chunk.chunk_end() < rhs.chunk.chunk_end();
};
return HeapSimulator::Chunk::FromOffsetEnd(
std::min_element(slice_decisions_sorted_by_start_time.begin(),
slice_decisions_sorted_by_start_time.end(), offset_cmp)
->chunk.offset,
std::max_element(slice_decisions_sorted_by_start_time.begin(),
slice_decisions_sorted_by_start_time.end(), end_cmp)
->chunk.chunk_end());
}
}
std::optional<int64_t> Allocation::cross_program_prefetch_index() const {
return cross_program_prefetch_index_;
}
HeapSimulator::Chunk Allocation::chunk() const {
CHECK(chunk_.has_value());
return *chunk_;
}
void Allocation::set_offset(int64_t offset) {
CHECK(chunk_.has_value());
*chunk_ = HeapSimulator::Chunk::FromOffsetSize(offset, chunk_->size);
}
bool Allocation::is_in_alternate_mem() const {
return memory_space_ == MemorySpace::kAlternate;
}
bool Allocation::is_in_default_mem() const {
return memory_space_ == MemorySpace::kDefault;
}
void Allocation::AddUse(HloUse use) {
HloInstruction* operand =
use.instruction->mutable_operand(use.operand_number);
for (int64_t index : use.operand_index) {
if (operand->opcode() != HloOpcode::kTuple) {
break;
}
operand = operand->mutable_operand(index);
}
std::function<HloInstruction*(HloInstruction*)> get_simplified_operand;
get_simplified_operand = [&](HloInstruction* instruction) {
while (instruction->opcode() == HloOpcode::kGetTupleElement) {
HloInstruction* operand =
get_simplified_operand(instruction->mutable_operand(0));
if (operand->opcode() == HloOpcode::kTuple) {
instruction = operand->mutable_operand(instruction->tuple_index());
} else {
return instruction;
}
}
return instruction;
};
operand = get_simplified_operand(operand);
uses_.push_back(use);
}
absl::Status Allocation::UpdateUses(HloComputation* computation,
HloInstruction* producing_instruction) {
for (const HloUse& use : uses()) {
HloInstruction* replacement_instruction = producing_instruction;
Shape operand_shape = use.instruction->operand(use.operand_number)->shape();
if (operand_shape.IsTuple()) {
TF_ASSIGN_OR_RETURN(
replacement_instruction,
TupleUtil::ReplaceTupleWith(
producing_instruction,
use.instruction->mutable_operand(use.operand_number),
use.operand_index));
} else if (operand_shape != producing_instruction->shape()) {
VLOG(4) << "Old shape = " << operand_shape.ToString()
<< ", new shape = " << producing_instruction->shape().ToString()
<< "; inserting a bitcast.";
replacement_instruction = computation->AddInstruction(
HloInstruction::CreateBitcast(operand_shape, producing_instruction));
}
TF_RETURN_IF_ERROR(use.instruction->ReplaceOperandWith(
use.operand_number, replacement_instruction));
}
return absl::OkStatus();
}
bool Allocation::is_copy_like_allocation() const {
return is_copy_allocation() || is_sliced_copy_allocation();
}
HloInstruction* Allocation::AddGetTupleElements() const {
CHECK_NE(defining_position().instruction, nullptr);
Shape shape = defining_position().shape();
CHECK(shape.IsArray()) << "Allocation shape is not an array. Shape = "
<< shape.ToString()
<< " position = " << defining_position().shape();
return TupleUtil::AddGetTupleElements(defining_position());
}
Allocation::Allocation(HloPosition defining_position, MemorySpace memory_space,
std::optional<HeapSimulator::Chunk> chunk,
int64_t start_time, int64_t end_time,
bool is_scoped_allocation,
std::optional<int64_t> cross_program_prefetch_index)
: original_defining_position_(std::move(defining_position)),
memory_space_(memory_space),
chunk_(chunk),
start_time_(start_time),
end_time_(end_time),
is_scoped_allocation_(is_scoped_allocation),
cross_program_prefetch_index_(cross_program_prefetch_index) {
CHECK(!is_scoped_allocation ||
original_defining_position_.index == ShapeIndex({}));
}
HloPosition Allocation::original_defining_position() const {
return original_defining_position_;
}
void Allocation::set_original_defining_position(HloPosition defining_position) {
original_defining_position_ = std::move(defining_position);
}
bool Allocation::base_is_equal(const Allocation& other) const {
return defining_position() == other.defining_position() &&
uses() == other.uses() && memory_space() == other.memory_space() &&
chunk() == other.chunk() && start_time() == other.start_time() &&
end_time() == other.end_time() &&
earliest_available_time() == other.earliest_available_time() &&
is_copy_allocation() == other.is_copy_allocation() &&
is_scoped_allocation() == other.is_scoped_allocation();
}
PinnedAllocation::PinnedAllocation(HloPosition defining_position,
MemorySpace memory_space,
std::optional<HeapSimulator::Chunk> chunk,
int64_t start_time, int64_t end_time,
bool is_scoped_allocation)
: Allocation(std::move(defining_position), memory_space, chunk, start_time,
end_time, is_scoped_allocation,
std::nullopt) {}
HloPosition PinnedAllocation::defining_position() const {
return original_defining_position();
}
bool PinnedAllocation::operator==(const PinnedAllocation& other) const {
return this->base_is_equal(static_cast<const Allocation&>(other));
}
bool MirroredAllocation::operator==(const MirroredAllocation& other) const {
return this->base_is_equal(static_cast<const Allocation&>(other));
}
bool ParentAllocation::operator==(const ParentAllocation& other) const {
return this->base_is_equal(static_cast<const Allocation&>(other));
}
bool PinnedAllocation::operator==(const Allocation& other) const {
const PinnedAllocation* casted_other =
dynamic_cast<const PinnedAllocation*>(&other);
return casted_other != nullptr && (*this) == (*casted_other);
}
absl::Status PinnedAllocation::Process() {
if (is_scoped_allocation()) {
return absl::OkStatus();
}
HloInstruction* producing_instruction = AddGetTupleElements();
HloComputation* computation = producing_instruction->parent();
return UpdateUses(computation, producing_instruction);
}
std::string PinnedAllocation::ToString() const {
std::string memory_space_str =
memory_space() == MemorySpace::kDefault ? "def" : "alt";
std::optional<HeapSimulator::Chunk> chunk = maybe_chunk();
if (chunk) {
absl::StrAppend(&memory_space_str, " (off: ", chunk->offset, ")");
}
return absl::StrCat((is_scoped_allocation() ? "Scoped " : ""),
"PinnedAllocation in ", memory_space_str, " defined at ",
original_defining_position().ToString(),
", start_time:", start_time(), ", end_time:", end_time(),
", uses: ", UsesToString(uses()));
}
void PinnedAllocation::MarkIfNeeded(
absl::flat_hash_set<const Allocation*>& needed_allocations) const {
MarkNeeded(needed_allocations);
}
void PinnedAllocation::MarkNeeded(
absl::flat_hash_set<const Allocation*>& needed_allocations) const {
needed_allocations.insert(this);
}
CopyAllocation::CopyAllocation(
Allocation& prev_allocation, MemorySpace memory_space,
std::optional<HeapSimulator::Chunk> chunk,
int64_t copy_start_schedule_after_time,
int64_t copy_done_schedule_before_time, int64_t end_time,
std::optional<int64_t> cross_program_prefetch_index)
: Allocation(
{nullptr, {}}, memory_space, chunk,
ExclusiveToInclusiveStartTime(copy_start_schedule_after_time),
end_time,
false, cross_program_prefetch_index),
prev_allocation_(prev_allocation),
copy_start_schedule_after_(copy_start_schedule_after_time),
copy_done_schedule_before_(copy_done_schedule_before_time) {}
int64_t CopyAllocation::earliest_available_time() const {
return copy_done_schedule_before_;
}
absl::Status CopyAllocation::Process() {
Shape shape = defining_position().shape();
HloInstruction* producing_instruction = AddGetTupleElements();
HloComputation* computation = producing_instruction->parent();
copy_start_ = computation->AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({shape, shape, ShapeUtil::MakeShape(U32, {})}),
producing_instruction, cross_program_prefetch_index()));
copy_done_ = computation->AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopyDone, copy_start_));
VLOG(4) << "Created " << copy_start_->name()
<< " for copy allocation: | #include "tensorflow/lite/allocation.h"
#if defined(__linux__)
#include <fcntl.h>
#endif
#include <sys/stat.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
namespace tflite {
TEST(MMAPAllocation, TestInvalidFile) {
if (!MMAPAllocation::IsSupported()) {
return;
}
TestErrorReporter error_reporter;
MMAPAllocation allocation("/tmp/tflite_model_1234", &error_reporter);
EXPECT_FALSE(allocation.valid());
}
TEST(MMAPAllocation, TestValidFile) {
if (!MMAPAllocation::IsSupported()) {
return;
}
TestErrorReporter error_reporter;
MMAPAllocation allocation(
"tensorflow/lite/testdata/empty_model.bin", &error_reporter);
ASSERT_TRUE(allocation.valid());
EXPECT_GT(allocation.fd(), 0);
EXPECT_GT(allocation.bytes(), 0);
EXPECT_NE(allocation.base(), nullptr);
}
#if defined(__linux__)
TEST(MMAPAllocation, TestInvalidFileDescriptor) {
if (!MMAPAllocation::IsSupported()) {
return;
}
TestErrorReporter error_reporter;
MMAPAllocation allocation(-1, &error_reporter);
EXPECT_FALSE(allocation.valid());
}
TEST(MMAPAllocation, TestInvalidSizeAndOffset) {
if (!MMAPAllocation::IsSupported()) {
return;
}
int fd =
open("tensorflow/lite/testdata/empty_model.bin", O_RDONLY);
ASSERT_GT(fd, 0);
struct stat fd_stat;
ASSERT_EQ(fstat(fd, &fd_stat), 0);
size_t file_size = fd_stat.st_size;
TestErrorReporter error_reporter;
MMAPAllocation allocation_invalid_offset(fd, file_size + 100,
1, &error_reporter);
EXPECT_FALSE(allocation_invalid_offset.valid());
MMAPAllocation allocation_invalid_length(fd, 0, 0,
&error_reporter);
EXPECT_FALSE(allocation_invalid_length.valid());
MMAPAllocation allocation_excessive_length(fd, 0,
file_size + 1,
&error_reporter);
EXPECT_FALSE(allocation_excessive_length.valid());
MMAPAllocation allocation_excessive_length_with_offset(
fd, 10, file_size, &error_reporter);
EXPECT_FALSE(allocation_excessive_length_with_offset.valid());
close(fd);
}
TEST(MMAPAllocation, TestValidFileDescriptor) {
if (!MMAPAllocation::IsSupported()) {
return;
}
int fd =
open("tensorflow/lite/testdata/empty_model.bin", O_RDONLY);
ASSERT_GT(fd, 0);
TestErrorReporter error_reporter;
MMAPAllocation allocation(fd, &error_reporter);
EXPECT_TRUE(allocation.valid());
EXPECT_GT(allocation.fd(), 0);
EXPECT_GT(allocation.bytes(), 0);
EXPECT_NE(allocation.base(), nullptr);
close(fd);
}
TEST(MMAPAllocation, TestValidFileDescriptorWithOffset) {
if (!MMAPAllocation::IsSupported()) {
return;
}
int fd =
open("tensorflow/lite/testdata/empty_model.bin", O_RDONLY);
ASSERT_GT(fd, 0);
struct stat fd_stat;
ASSERT_EQ(fstat(fd, &fd_stat), 0);
size_t file_size = fd_stat.st_size;
TestErrorReporter error_reporter;
MMAPAllocation allocation(fd, 10, file_size - 10,
&error_reporter);
EXPECT_TRUE(allocation.valid());
EXPECT_GT(allocation.fd(), 0);
EXPECT_GT(allocation.bytes(), 0);
EXPECT_NE(allocation.base(), nullptr);
close(fd);
}
#endif
} |
712 | cpp | tensorflow/tensorflow | mutable_op_resolver | tensorflow/lite/mutable_op_resolver.cc | tensorflow/lite/mutable_op_resolver_test.cc | #ifndef TENSORFLOW_LITE_MUTABLE_OP_RESOLVER_H_
#define TENSORFLOW_LITE_MUTABLE_OP_RESOLVER_H_
#include <stddef.h>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace op_resolver_hasher {
template <typename V>
struct ValueHasher {
size_t operator()(const V& v) const { return std::hash<V>()(v); }
};
template <>
struct ValueHasher<tflite::BuiltinOperator> {
size_t operator()(const tflite::BuiltinOperator& v) const {
return std::hash<int>()(static_cast<int>(v));
}
};
template <typename T>
struct OperatorKeyHasher {
size_t operator()(const T& x) const {
size_t a = ValueHasher<typename T::first_type>()(x.first);
size_t b = ValueHasher<typename T::second_type>()(x.second);
return CombineHashes({a, b});
}
};
}
class MutableOpResolver : public OpResolver {
public:
const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
int version) const override;
const TfLiteRegistration* FindOp(const char* op, int version) const override;
void AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration, int version = 1);
void AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration, int min_version,
int max_version);
void AddCustom(const char* name, const TfLiteRegistration* registration,
int version = 1);
void AddCustom(const char* name, const TfLiteRegistration* registration,
int min_version, int max_version);
void AddAll(const MutableOpResolver& other);
OpResolver::TfLiteDelegateCreators GetDelegateCreators() const final {
return delegate_creators_;
}
OpResolver::TfLiteOpaqueDelegateCreators GetOpaqueDelegateCreators()
const final {
return opaque_delegate_creators_;
}
protected:
void ChainOpResolver(const OpResolver* other);
bool may_directly_contain_user_defined_ops_ = false;
TfLiteDelegateCreators delegate_creators_;
TfLiteOpaqueDelegateCreators opaque_delegate_creators_;
private:
bool MayContainUserDefinedOps() const override;
typedef std::pair<tflite::BuiltinOperator, int> BuiltinOperatorKey;
typedef std::pair<std::string, int> CustomOperatorKey;
std::unordered_map<BuiltinOperatorKey, TfLiteRegistration,
op_resolver_hasher::OperatorKeyHasher<BuiltinOperatorKey> >
builtins_;
std::unordered_map<CustomOperatorKey, TfLiteRegistration,
op_resolver_hasher::OperatorKeyHasher<CustomOperatorKey> >
custom_ops_;
std::vector<const OpResolver*> other_op_resolvers_;
};
}
#endif
#include "tensorflow/lite/mutable_op_resolver.h"
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/api/op_resolver_internal.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
const TfLiteRegistration* MutableOpResolver::FindOp(tflite::BuiltinOperator op,
int version) const {
auto it = builtins_.find(std::make_pair(op, version));
if (it != builtins_.end()) {
return &it->second;
}
for (const OpResolver* other : other_op_resolvers_) {
const TfLiteRegistration* result = other->FindOp(op, version);
if (result != nullptr) {
return result;
}
}
return nullptr;
}
const TfLiteRegistration* MutableOpResolver::FindOp(const char* op,
int version) const {
auto it = custom_ops_.find(std::make_pair(op, version));
if (it != custom_ops_.end()) {
return &it->second;
}
for (const OpResolver* other : other_op_resolvers_) {
const TfLiteRegistration* result = other->FindOp(op, version);
if (result != nullptr) {
return result;
}
}
return nullptr;
}
void MutableOpResolver::AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration,
int version) {
if (registration == nullptr) {
return;
}
TfLiteRegistration new_registration = *registration;
new_registration.custom_name = nullptr;
new_registration.builtin_code = op;
new_registration.version = version;
auto op_key = std::make_pair(op, version);
builtins_[op_key] = new_registration;
may_directly_contain_user_defined_ops_ = true;
}
void MutableOpResolver::AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration,
int min_version, int max_version) {
for (int version = min_version; version <= max_version; ++version) {
AddBuiltin(op, registration, version);
}
}
void MutableOpResolver::AddCustom(const char* name,
const TfLiteRegistration* registration,
int version) {
TfLiteRegistration new_registration = *registration;
new_registration.builtin_code = BuiltinOperator_CUSTOM;
new_registration.custom_name = name;
new_registration.version = version;
auto op_key = std::make_pair(name, version);
custom_ops_[op_key] = new_registration;
may_directly_contain_user_defined_ops_ = true;
}
void MutableOpResolver::AddCustom(const char* name,
const TfLiteRegistration* registration,
int min_version, int max_version) {
for (int version = min_version; version <= max_version; ++version) {
AddCustom(name, registration, version);
}
}
void MutableOpResolver::AddAll(const MutableOpResolver& other) {
for (const auto& other_builtin : other.builtins_) {
builtins_[other_builtin.first] = other_builtin.second;
}
for (const auto& other_custom_op : other.custom_ops_) {
custom_ops_[other_custom_op.first] = other_custom_op.second;
}
other_op_resolvers_.insert(other_op_resolvers_.begin(),
other.other_op_resolvers_.begin(),
other.other_op_resolvers_.end());
}
void MutableOpResolver::ChainOpResolver(const OpResolver* other) {
other_op_resolvers_.push_back(other);
}
bool MutableOpResolver::MayContainUserDefinedOps() const {
if (may_directly_contain_user_defined_ops_) {
return true;
}
for (const OpResolver* other : other_op_resolvers_) {
if (OpResolverInternal::MayContainUserDefinedOps(*other)) {
return true;
}
}
return false;
}
} | #include "tensorflow/lite/mutable_op_resolver.h"
#include <stddef.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
TfLiteStatus DummyInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteRegistration* GetDummyRegistration() {
static TfLiteRegistration registration = {
.init = nullptr,
.free = nullptr,
.prepare = nullptr,
.invoke = DummyInvoke,
};
return ®istration;
}
TfLiteStatus Dummy2Invoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus Dummy2Prepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
void* Dummy2Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Dummy2free(TfLiteContext* context, void* buffer) {}
TfLiteRegistration* GetDummy2Registration() {
static TfLiteRegistration registration = {
.init = Dummy2Init,
.free = Dummy2free,
.prepare = Dummy2Prepare,
.invoke = Dummy2Invoke,
};
return ®istration;
}
TEST(MutableOpResolverTest, FindOp) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_ADD);
EXPECT_EQ(found_registration->version, 1);
}
TEST(MutableOpResolverTest, FindMissingOp) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, RegisterOpWithSingleVersion) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, RegisterOpWithMultipleVersions) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 3);
}
TEST(MutableOpResolverTest, FindOpWithUnsupportedVersions) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 4);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, FindCustomOp) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("AWESOME", 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 1);
}
TEST(MutableOpResolverTest, FindCustomName) {
MutableOpResolver resolver;
TfLiteRegistration* reg = GetDummyRegistration();
reg->custom_name = "UPDATED";
resolver.AddCustom(reg->custom_name, reg);
const TfLiteRegistration* found_registration =
resolver.FindOp(reg->custom_name, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_EQ(found_registration->invoke, GetDummyRegistration()->invoke);
EXPECT_EQ(found_registration->version, 1);
EXPECT_EQ(found_registration->custom_name, "UPDATED");
}
TEST(MutableOpResolverTest, FindBuiltinName) {
MutableOpResolver resolver1;
TfLiteRegistration* reg = GetDummy2Registration();
reg->custom_name = "UPDATED";
resolver1.AddBuiltin(BuiltinOperator_ADD, reg);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->prepare,
GetDummy2Registration()->prepare);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->init,
GetDummy2Registration()->init);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->free,
GetDummy2Registration()->free);
EXPECT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->custom_name, nullptr);
}
TEST(MutableOpResolverTest, FindMissingCustomOp) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp("EXCELLENT", 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, FindCustomOpWithUnsupportedVersion) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("AWESOME", 2);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, AddAll) {
MutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver1.AddBuiltin(BuiltinOperator_MUL, GetDummy2Registration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_SUB, GetDummyRegistration());
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
resolver1.AddAll(resolver2);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_MUL, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummyRegistration()->invoke);
}
class ChainingMutableOpResolver : public MutableOpResolver {
public:
using MutableOpResolver::ChainOpResolver;
};
TEST(MutableOpResolverTest, ChainOpResolver) {
ChainingMutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver1.AddBuiltin(BuiltinOperator_MUL, GetDummy2Registration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_SUB, GetDummyRegistration());
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
resolver1.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_MUL, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, CopyConstructChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3(resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, AssignChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3;
resolver3 = resolver2;
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, AddAllChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3;
resolver3.AddAll(resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, ChainOpResolverCustomOpPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
MutableOpResolver resolver2;
resolver2.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver3;
resolver3.ChainOpResolver(&resolver1);
resolver3.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, ChainOpResolverBuiltinOpPrecedence) {
MutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
ChainingMutableOpResolver resolver3;
resolver3.ChainOpResolver(&resolver1);
resolver3.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, ChainOpResolverAddVersusChainPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver1);
MutableOpResolver resolver3;
resolver3.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver4;
resolver4.ChainOpResolver(&resolver2);
resolver4.ChainOpResolver(&resolver3);
ASSERT_EQ(resolver4.FindOp("MyCustom", 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, AddAllAddVersusChainPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver1);
MutableOpResolver resolver3;
resolver3.AddCustom("MyCustom", GetDummy2Registration());
MutableOpResolver resolver4;
resolver4.AddAll(resolver2);
resolver4.AddAll(resolver3);
ASSERT_EQ(resolver4.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
}
}
} |
713 | cpp | tensorflow/tensorflow | arena_planner | tensorflow/lite/arena_planner.cc | tensorflow/lite/arena_planner_test.cc | #ifndef TENSORFLOW_LITE_ARENA_PLANNER_H_
#define TENSORFLOW_LITE_ARENA_PLANNER_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/memory_planner.h"
#include "tensorflow/lite/simple_memory_arena.h"
#include "tensorflow/lite/util.h"
namespace tflite {
constexpr const int kDefaultArenaAlignment = 64;
class ArenaPlanner : public MemoryPlanner {
public:
ArenaPlanner(TfLiteContext* context, std::unique_ptr<GraphInfo> graph_info,
bool preserve_all_tensors, int tensor_alignment,
int subgraph_index = 0);
~ArenaPlanner() override;
ArenaPlanner(const ArenaPlanner&) = delete;
ArenaPlanner& operator=(const ArenaPlanner&) = delete;
TfLiteStatus ResetAllocations() override;
TfLiteStatus ResetAllocationsAfter(int node) override;
TfLiteStatus PlanAllocations() override;
TfLiteStatus ExecuteAllocations(int first_node, int last_node) override;
TfLiteStatus ReleaseNonPersistentMemory() override;
TfLiteStatus AcquireNonPersistentMemory() override;
bool HasNonPersistentMemory() override;
void DumpDebugInfo(const std::vector<int>& execution_plan) const override;
void GetAllocInfo(size_t* arena_size,
size_t* arena_persist_size) const override;
std::intptr_t BasePointer(TfLiteAllocationType type);
private:
bool InputTensorCanBeShared(const TfLiteTensor& input,
const TfLiteTensor& output, int input_id,
int output_id, bool tensor_changed);
void IdentifyInPlaceTensors();
TfLiteStatus Commit(bool* arena_reallocated);
void CreateTensorAllocationVector(std::vector<int32_t>* tensors_to_allocate);
std::vector<int32_t> GetTensorsToAllocate(int first_node, int last_node);
TfLiteStatus CalculateAllocations(int first_node, int last_node,
std::vector<int32_t>* tensors_allocated);
TfLiteStatus ResolveTensorAllocation(int32_t tensor_index,
TfLiteTensor* tensors);
TfLiteStatus CalculateAllocationOfInternalTensors(int node_index);
TfLiteStatus CalculateDeallocationOfInternalTensors(int node_index);
int FindSharedTensor(int tensor_index);
TfLiteContext* context_;
std::unique_ptr<GraphInfo> graph_info_;
std::vector<ArenaAllocWithUsageInterval> allocs_;
std::vector<std::unordered_set<int32_t>> nodes_to_tensors_;
std::vector<int32_t> alloc_node_;
std::vector<int32_t> dealloc_node_;
SimpleMemoryArena arena_;
bool has_nonpersistent_memory_;
SimpleMemoryArena persistent_arena_;
bool preserve_all_tensors_;
int tensor_alignment_;
int last_active_node_;
std::unordered_map<int32_t, int32_t> actual_tensor_id_;
std::vector<int> refcounts_;
};
}
#endif
#include "tensorflow/lite/arena_planner.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/simple_memory_arena.h"
namespace tflite {
constexpr int32_t kLastActiveNodeUndefined =
std::numeric_limits<int32_t>::max();
constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
constexpr int32_t kScalarTensorBytes = 4;
ArenaPlanner::ArenaPlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info,
bool preserve_all_tensors, int tensor_alignment,
int subgraph_index)
: context_(context),
graph_info_(std::move(graph_info)),
arena_(kDefaultArenaAlignment, subgraph_index),
has_nonpersistent_memory_(false),
persistent_arena_(kDefaultArenaAlignment, subgraph_index),
preserve_all_tensors_(preserve_all_tensors),
tensor_alignment_(tensor_alignment),
last_active_node_(kLastActiveNodeUndefined) {}
ArenaPlanner::~ArenaPlanner() {
arena_.ReleaseBuffer();
persistent_arena_.ReleaseBuffer();
}
std::intptr_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {
if (type == kTfLiteArenaRwPersistent) {
return persistent_arena_.BasePointer();
}
if (type == kTfLiteArenaRw) {
return arena_.BasePointer();
}
return 0;
}
TfLiteStatus ArenaPlanner::ResetAllocations() {
TF_LITE_ENSURE_STATUS(arena_.ClearPlan());
TF_LITE_ENSURE_STATUS(persistent_arena_.ClearPlan());
allocs_.clear();
allocs_.resize(graph_info_->num_tensors());
last_active_node_ = kLastActiveNodeUndefined;
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].first_node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].reset();
tensor.data.raw = nullptr;
}
}
}
if (last_active_node_ > node) {
arena_.CalculateActiveAllocs(allocs_, node);
} else {
arena_.PurgeAfter(node);
}
last_active_node_ = node;
return kTfLiteOk;
}
int ArenaPlanner::FindSharedTensor(int tensor_index) {
auto actual_tensor_it = actual_tensor_id_.find(tensor_index);
if (actual_tensor_it != actual_tensor_id_.end()) {
tensor_index = actual_tensor_it->second;
}
return tensor_index;
}
bool ArenaPlanner::InputTensorCanBeShared(const TfLiteTensor& input_tensor,
const TfLiteTensor& output_tensor,
int input_id, int output_id,
bool tensor_changed) {
if (tensor_changed) {
if (input_tensor.bytes != output_tensor.bytes ||
input_tensor.bytes <= kScalarTensorBytes) {
return false;
}
if (refcounts_[input_id] > 1) {
return false;
}
}
for (int input : graph_info_->inputs()) {
if (input == input_id) {
return false;
}
}
for (int output : graph_info_->outputs()) {
if (output == output_id) {
return false;
}
}
TfLiteAllocationType input_allocation_type = input_tensor.allocation_type;
TfLiteAllocationType output_allocation_type = output_tensor.allocation_type;
if (input_allocation_type != output_allocation_type &&
input_allocation_type != kTfLiteArenaRw) {
return false;
}
if (preserve_all_tensors_) {
return false;
}
return true;
}
void ArenaPlanner::IdentifyInPlaceTensors() {
actual_tensor_id_.clear();
const int num_execution_nodes = graph_info_->num_execution_nodes();
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_execution_nodes; ++i) {
const TfLiteRegistration& registration = graph_info_->registration(i);
const TfLiteNode& node = graph_info_->node(i);
if (node.outputs->size < 1) continue;
bool tensor_changed =
!(registration.inplace_operator & kTfLiteInplaceOpDataUnmodified);
if (registration.inplace_operator == kTfLiteInplaceOpNone) {
continue;
}
int32_t input_id = -1;
int32_t output_id = node.outputs->data[0];
const TfLiteTensor& output_tensor = tensors[output_id];
const int loop_end =
std::min(kTfLiteMaxSharableOpInputs, node.inputs->size);
for (int i = 0; i < loop_end; ++i) {
if (node.inputs->data[i] == kTfLiteOptionalTensor) {
continue;
}
const bool input_shareable =
registration.inplace_operator & (kTfLiteInplaceOpInput0Shared << i);
if (input_shareable) {
const TfLiteTensor& input_tensor = tensors[node.inputs->data[i]];
if (InputTensorCanBeShared(input_tensor, output_tensor,
node.inputs->data[i], output_id,
tensor_changed)) {
input_id = node.inputs->data[i];
break;
}
}
}
if (input_id == -1) {
continue;
}
int32_t actual_output_tensor_id = FindSharedTensor(input_id);
if (tensor_changed) {
if (refcounts_[actual_output_tensor_id] > 1) {
continue;
}
}
actual_tensor_id_[output_id] = actual_output_tensor_id;
}
}
TfLiteStatus ArenaPlanner::PlanAllocations() {
const size_t num_tensors = graph_info_->num_tensors();
TF_LITE_ENSURE_STATUS(ResetAllocations());
alloc_node_.assign(num_tensors, kNodeNotAssigned);
dealloc_node_.assign(num_tensors, kNodeNotAssigned);
nodes_to_tensors_.clear();
nodes_to_tensors_.resize(
std::max(graph_info_->num_execution_nodes(), (size_t)1), {});
refcounts_.assign(num_tensors, 0);
auto allocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] != kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
alloc_node_[tensor] = node;
return kTfLiteOk;
};
auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] == kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
dealloc_node_[tensor] = node;
return kTfLiteOk;
};
for (int tensor_index : graph_info_->outputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
}
}
for (int tensor_index : graph_info_->variables()) {
++refcounts_[tensor_index];
TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
nodes_to_tensors_[0].insert(tensor_index);
}
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
nodes_to_tensors_[0].insert(tensor_index);
}
}
std::vector<int> refcounts = refcounts_;
const int num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
}
}
}
IdentifyInPlaceTensors();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
tensor_index = FindSharedTensor(tensor_index);
++refcounts[tensor_index];
}
}
}
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
if (tensor_index == kTfLiteOptionalTensor) continue;
nodes_to_tensors_[i].insert(tensor_index);
TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
if (!preserve_all_tensors_) {
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
tensor_index = FindSharedTensor(tensor_index);
--refcounts[tensor_index];
if (refcounts[tensor_index] == 0) {
TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) {
const size_t num_tensors = graph_info_->num_tensors();
TF_LITE_ENSURE(context_, num_tensors >= allocs_.size());
alloc_node_.resize(num_tensors, kNodeNotAssigned);
dealloc_node_.resize(num_tensors, kNodeNotAssigned);
allocs_.resize(num_tensors);
const int num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = first_node;
i <= static_cast<size_t>(last_node) && i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_temporaries = node.temporaries;
for (int j = 0; j < node_temporaries->size; ++j) {
int tensor_index = node_temporaries->data[j];
alloc_node_[tensor_index] = i;
nodes_to_tensors_[i].insert(tensor_index);
if (!preserve_all_tensors_) {
dealloc_node_[tensor_index] = i;
}
}
}
std::vector<int32_t> tensors_allocated;
TF_LITE_ENSURE_STATUS(
CalculateAllocations(first_node, last_node, &tensors_allocated));
bool arena_reallocated = false;
TF_LITE_ENSURE_STATUS(Commit(&arena_reallocated));
TfLiteTensor* tensors = graph_info_->tensors();
if (arena_reallocated) {
for (int i = 0; i < static_cast<int>(num_tensors); ++i) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i, tensors));
}
} else {
for (int i = 0; i < static_cast<int>(tensors_allocated.size()); ++i) {
TF_LITE_ENSURE_STATUS(
ResolveTensorAllocation(tensors_allocated[i], tensors));
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ReleaseNonPersistentMemory() {
TF_LITE_ENSURE_STATUS(arena_.ReleaseBuffer());
has_nonpersistent_memory_ = false;
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
tensor.data.raw = nullptr;
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::AcquireNonPersistentMemory() {
bool reallocated;
TF_LITE_ENSURE_STATUS(arena_.Commit(&reallocated));
has_nonpersistent_memory_ = true;
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i, tensors));
}
}
return kTfLiteOk;
}
bool ArenaPlanner::HasNonPersistentMemory() {
return has_nonpersistent_memory_;
}
void ArenaPlanner::DumpDebugInfo(const std::vector<int>& execution_plan) const {
arena_.DumpDebugInfo("kTfLiteArenaRw Dump:", execution_plan);
persistent_arena_.DumpDebugInfo("kTfLiteArenaRwPersistent Dump:",
execution_plan);
}
void ArenaPlanner::GetAllocInfo(size_t* arena_size,
size_t* arena_persist_size) const {
*arena_size = arena_.GetBufferSize();
*arena_persist_size = persistent_arena_.GetBufferSize();
}
TfLiteStatus ArenaPlanner::Commit(bool* reallocated) {
bool arena_reallocated, persistent_arena_reallocated;
TF_LITE_ENSURE_STATUS(arena_.Commit(&arena_reallocated));
has_nonpersistent_memory_ = true;
TF_LITE_ENSURE_STATUS(
persistent_arena_.Commit(&persistent_arena_reallocated));
*reallocated = arena_reallocated;
*reallocated |= persistent_arena_reallocated;
return kTfLiteOk;
}
void ArenaPlanner::CreateTensorAllocationVector(
std::vector<int32_t>* tensors_to_allocate) {
const TfLiteTensor* tensors = this->graph_info_->tensors();
auto tensor_compare = [&](int idx1, int idx2) {
if (alloc_node_[idx1] == 0 && dealloc_node_[idx1] == kNodeNotAssigned) {
if (alloc_node_[idx2] == 0 && dealloc_node_[idx2] == kNodeNotAssigned) {
return idx1 < idx2;
}
return true;
}
if (alloc_node_[idx2] == 0 && dealloc_node_[idx2] == kNodeNotAssigned) {
return false;
}
auto size1 = tensors[idx1].bytes;
auto size2 = tensors[idx2].bytes;
if (size1 != size2) {
return size1 > size2;
}
return alloc_node_[idx1] < alloc_node_[idx2];
};
std::sort(tensors_to_allocate->begin(), tensors_to_allocate->end(),
tensor_compare);
}
std::vector<int32_t> ArenaPlanner::GetTensorsToAllocate(int first_node,
int last_node) {
int num_tensors = static_cast<int>(graph_info_->num_tensors());
std::vector<int32_t> tensors_to_allocate;
tensors_to_allocate.reserve(num_tensors);
for (int i = first_node; i <= last_node; ++i) {
tensors_to_allocate.insert(tensors_to_allocate.end(),
nodes_to_tensors_[i].begin(),
nodes_to_tensors_[i].end());
}
return tensors_to_allocate;
}
TfLiteStatus ArenaPlanner::CalculateAllocations(
int first_node, int last_node, std::vector<int32_t>* tensors_allocated) {
const std::vector<int32_t> tensors_to_allocate =
GetTensorsToAllocate(first_node, last_node);
tensors_allocated->reserve(tensors_to_allocate.size());
TfLiteTensor* tensors = graph_info_->tensors();
for (const auto& tensor_index : tensors_to_allocate) {
TfLiteTensor& tensor = tensors[tensor_index];
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size < tensor.bytes) {
tensors_allocated->push_back(tensor_index);
}
} else if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
tensors_allocated->push_back(tensor_index);
}
}
if (tensors_allocated->empty()) {
last_active_node_ = last_node;
return kTfLiteOk;
}
if (first_node < last_active_node_) {
arena_.ResetAllocs();
last_active_node_ = first_node;
} else {
arena_.PurgeActiveAllocs(first_node);
}
CreateTensorAllocationVector(tensors_allocated);
for (const auto& tensor_index : *tensors_allocated) {
TfLiteTensor& tensor = tensors[tensor_index];
auto it = actual_tensor_id_.find(tensor_index);
if (it != actual_tensor_id_.end()) {
TfLiteAllocationType allocation_type =
tensors[it->second].allocation_type;
if (allocation_type != kTfLiteArenaRw ||
tensors[it->second].bytes != tensors[it->first].bytes) {
actual_tensor_id_.erase(it);
} else {
continue;
}
}
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(
arena_.Allocate(context_, tensor_alignment_, tensor.bytes,
tensor_index, alloc_node_[tensor_index],
dealloc_node_[tensor_index], &allocs_[tensor_index]));
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
allocs_[tensor_index].size == 0) {
if (allocs_[tensor_index].size < tensor.bytes) {
TF_LITE_ENSURE_STATUS(persistent_arena_.Allocate(
context_, tensor_alignment_, tensor.bytes, tensor_index,
alloc_node_[tensor_index],
std::numeric_limits<int32_t>::max(),
&allocs_[tensor_index]));
}
}
}
last_active_node_ = last_node;
return kTfLiteOk;
}
bool AreTensorsAllocatedInSameArena(int32_t root_tensor_index,
int32_t tensor_index,
const TfLiteTensor* tensors) {
if (tensors[root_tensor_index].allocation_type == kTfLiteArenaRw &&
tensors[tensor_index].allocation_type == kTfLiteArenaRw) {
return true;
}
if (tensors[root_tensor_index].allocation_type == kTfLiteArenaRwPersistent &&
tensors[tensor_index].allocation_type == kTfLiteArenaRwPersistent) {
return true;
}
return false;
}
TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int32_t tensor_index,
TfLiteTensor* tensors) {
auto actual_tensor_it = actual_tensor_id_.find(tensor_index);
TfLiteTensor& tensor = tensors[tensor_index];
int32_t root_tensor_index = actual_tensor_it == actual_tensor_id_.end()
? tensor_index
: actual_tensor_it->second;
const TfLiteTensor& root_tensor = tensors[root_tensor_index];
if (root_tensor_index != tensor_index) {
if (AreTensorsAllocatedInSameArena(root_tensor_index, tensor_index,
tensors)) {
ResolveTensorAllocation(root_tensor_index, tensors);
tensor.data.data = root_tensor.data.data;
return kTfLiteOk;
}
}
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size != 0) {
return arena_.ResolveAlloc(context_, allocs_[tensor_index],
&tensor.data.raw);
}
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
return persistent_arena_.ResolveAlloc(context_, allocs_[tensor_index],
&tensor.data.raw);
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/arena_planner.h"
#include <algorithm>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <initializer_list>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
int gNumAlloc = 0;
void OnTfLiteArenaAlloc(int subgraph_index, int arena_id, size_t num_bytes) {
gNumAlloc++;
}
int gNumDealloc = 0;
void OnTfLiteArenaDealloc(int subgraph_index, int arena_id, size_t num_bytes) {
gNumDealloc++;
}
namespace {
constexpr const int kTensorAlignment = 4;
class TestOp {
public:
TestOp(std::initializer_list<int> inputs, std::initializer_list<int> outputs,
std::initializer_list<int> temporaries,
int builtin_code = kTfLiteBuiltinAdd,
int inplace_operator = kTfLiteInplaceOpInput0Shared)
: inputs_(inputs),
outputs_(outputs),
temporaries_(temporaries),
registration_{} {
registration_.builtin_code = builtin_code;
registration_.inplace_operator = inplace_operator;
}
const std::vector<int>& inputs() const { return inputs_; }
const std::vector<int>& outputs() const { return outputs_; }
const std::vector<int>& temporaries() const { return temporaries_; }
const TfLiteRegistration& registration() const { return registration_; }
private:
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> temporaries_;
TfLiteRegistration registration_;
};
class TestGraph {
public:
TestGraph(std::initializer_list<int> inputs,
std::initializer_list<TestOp> nodes,
std::initializer_list<int> outputs)
: inputs_(inputs), outputs_(outputs) {
int max_tensor_index = 0;
for (int t : inputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (int t : outputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (const auto& node : nodes) {
auto int_array = [](const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
};
registrations_.push_back(node.registration());
nodes_.push_back(TfLiteNode());
nodes_.back().inputs = int_array(node.inputs());
for (int t : node.inputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().outputs = int_array(node.outputs());
for (int t : node.outputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().temporaries = int_array(node.temporaries());
for (int t : node.temporaries()) {
max_tensor_index = std::max(max_tensor_index, t);
}
}
for (int i = 0; i <= max_tensor_index; ++i) {
tensors_.push_back(TfLiteTensor());
tensors_.back().allocation_type = kTfLiteArenaRw;
tensors_.back().bytes = (i + 1) * 3;
}
}
~TestGraph() {
for (auto node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
TfLiteIntArrayFree(node.temporaries);
}
}
const std::vector<TfLiteNode>& nodes() { return nodes_; }
std::vector<TfLiteTensor>* tensors() { return &tensors_; }
const std::vector<int>& inputs() { return inputs_; }
const std::vector<int>& outputs() { return outputs_; }
const std::vector<int>& variables() { return variables_; }
const std::vector<TfLiteRegistration>& registrations() {
return registrations_;
}
void SetVariables(const std::vector<int>& variables) {
variables_ = variables;
}
void Swap(TestGraph* other) {
std::swap(nodes_, other->nodes_);
std::swap(tensors_, other->tensors_);
std::swap(inputs_, other->inputs_);
std::swap(outputs_, other->outputs_);
std::swap(variables_, other->variables_);
}
private:
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<TfLiteRegistration> registrations_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
};
class TestGraphInfo : public GraphInfo {
public:
explicit TestGraphInfo(TestGraph* graph) : graph_(graph) {}
size_t num_tensors() const override { return graph_->tensors()->size(); }
TfLiteTensor* tensors() override { return graph_->tensors()->data(); }
TfLiteTensor* tensor(size_t index) override {
return &graph_->tensors()->at(index);
}
size_t num_execution_nodes() const override { return graph_->nodes().size(); }
size_t num_total_nodes() const override { return graph_->nodes().size(); }
const TfLiteNode& node(size_t index) const override {
return graph_->nodes()[index];
}
const TfLiteRegistration& registration(size_t index) const override {
return graph_->registrations()[index];
}
size_t node_index(size_t index) const override { return index; }
const std::vector<int>& inputs() const override { return graph_->inputs(); }
const std::vector<int>& outputs() const override { return graph_->outputs(); }
const std::vector<int>& variables() const override {
return graph_->variables();
}
private:
TestGraph* graph_;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
LOG(INFO) << temp_buffer;
}
class ArenaPlannerTest : public ::testing::Test {
protected:
void SetGraph(TestGraph* graph, bool preserve_all_tensors = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_ = std::make_unique<ArenaPlanner>(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)),
preserve_all_tensors, kTensorAlignment);
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void SwapGraph(TestGraph* graph) {
graph_->Swap(graph);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void Execute(int start, int end) {
CHECK(planner_->ExecuteAllocations(start, end) == kTfLiteOk);
}
void ReleaseNonPersistentMemory() {
CHECK(planner_->ReleaseNonPersistentMemory() == kTfLiteOk);
}
void AcquireNonPersistentMemory() {
CHECK(planner_->AcquireNonPersistentMemory() == kTfLiteOk);
}
void ResetAllocations() { CHECK(planner_->ResetAllocations() == kTfLiteOk); }
void ResetAllocationsAfter(int node) {
CHECK(planner_->ResetAllocationsAfter(node) == kTfLiteOk);
}
bool HasNonPersistentMemory() {
return planner_ && planner_->HasNonPersistentMemory();
}
void Destroy() { planner_.reset(); }
std::ptrdiff_t GetOffset(int tensor_index) {
const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
return reinterpret_cast<std::intptr_t>(tensor.data.raw) -
planner_->BasePointer(tensor.allocation_type);
}
std::ptrdiff_t GetOffsetAfter(int tensor_index) {
const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
std::ptrdiff_t offset = GetOffset(tensor_index) + tensor.bytes;
if (offset % kTensorAlignment != 0) {
offset += kTensorAlignment - offset % kTensorAlignment;
}
return offset;
}
bool IsUnallocated(int tensor_index) {
return (*graph_->tensors())[tensor_index].data.raw == nullptr;
}
TfLiteContext context_;
TestGraph* graph_;
std::unique_ptr<ArenaPlanner> planner_;
};
TEST_F(ArenaPlannerTest, EmptyGraph) {
TestGraph graph({}, {}, {});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
}
TEST_F(ArenaPlannerTest, GraphWithOneOp) {
TestGraph graph({0, 10}, {{{0}, {}, {}}, {{10}, {}, {}}}, {5, 11});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(10), GetOffsetAfter(0));
EXPECT_TRUE((*graph.tensors())[5].data.raw == nullptr);
EXPECT_TRUE((*graph.tensors())[11].data.raw == nullptr);
}
TEST_F(ArenaPlannerTest, GraphWithOneOp2) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), 8);
EXPECT_EQ(GetOffsetAfter(2), 20);
}
TEST_F(ArenaPlannerTest, ZeroSizedTensors) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
(*graph.tensors())[1].bytes = 0;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ((*graph_->tensors())[1].data.raw, nullptr);
}
TEST_F(ArenaPlannerTest, SimpleGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, AllocsCorrectlyReset) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
ResetAllocations();
std::vector<TfLiteTensor>& tensors = *graph.tensors();
tensors[0].bytes += 1;
tensors[1].bytes += 1;
tensors[2].bytes += 1;
tensors[3].bytes += 1;
tensors[4].bytes += 1;
tensors[5].bytes += 1;
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, SimpleGraphInputsPreserved) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithInplaceReshape) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{1}, {3}, {}},
{{2, 3},
{4},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{4}, {5}, {}}
},
{5});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[4].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithChainOfInplaceOps) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 3},
{4},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{4, 3},
{5},
{},
kTfLiteBuiltinExpandDims,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{5, 3},
{6},
{},
kTfLiteBuiltinSqueeze,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{6, 3},
{7},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{7}, {8}, {}},
},
{8});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[4].bytes = 24;
(*graph.tensors())[5].bytes = 24;
(*graph.tensors())[6].bytes = 24;
(*graph.tensors())[7].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(2));
EXPECT_EQ(GetOffset(2), GetOffset(4));
EXPECT_EQ(GetOffset(2), GetOffset(5));
EXPECT_EQ(GetOffset(2), GetOffset(6));
EXPECT_EQ(GetOffset(2), GetOffset(7));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeInputOutput) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 1},
{3},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{3}, {4}, {}}},
{4});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[3].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(3));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeInputTensor) {
TestGraph graph({0, 1},
{
{{0, 1},
{2},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared |
kTfLiteInplaceOpDataUnmodified},
{{4}, {3}, {}}},
{3});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(0), GetOffset(2));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeOutputTensor) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 1},
{3},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared |
kTfLiteInplaceOpDataUnmodified},
},
{3});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(2), GetOffset(3));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeDynamicInput) {
TestGraph graph({0, 1},
{
{{0, 1},
{2},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpDataUnmodified}
},
{2});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(0), GetOffset(2));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithBroadcastingAddInPlace) {
TestGraph graph(
{0, 1},
{
{{0, 1}, {3}, {}},
{{1, 2}, {4}, {}},
{{3, 4},
{5},
{},
kTfLiteBuiltinAdd,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[3].bytes = 8;
(*graph.tensors())[4].bytes = 16;
(*graph.tensors())[5].bytes = 16;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(3), GetOffset(5));
EXPECT_EQ(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithBroadcastingAddNotInPlace) {
TestGraph graph(
{0, 1},
{
{{0, 1}, {3}, {}},
{{1, 2}, {4}, {}},
{{3, 4}, {5}, {}, kTfLiteBuiltinAdd, kTfLiteInplaceOpInput0Shared},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[3].bytes = 8;
(*graph.tensors())[4].bytes = 8;
(*graph.tensors())[5].bytes = 64;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(3), GetOffset(5));
EXPECT_NE(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
ResetAllocationsAfter(0);
EXPECT_FALSE(IsUnallocated(0));
EXPECT_FALSE(IsUnallocated(1));
EXPECT_FALSE(IsUnallocated(2));
EXPECT_TRUE(IsUnallocated(3));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_TRUE(IsUnallocated(5));
(*graph.tensors())[4].bytes += 64;
Execute(1, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(2), 48);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithPersistentResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
(*graph.tensors())[5].allocation_type = kTfLiteArenaRwPersistent;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
void* tensor5_ptr = (*graph.tensors())[5].data.raw;
ResetAllocationsAfter(0);
EXPECT_FALSE(IsUnallocated(0));
EXPECT_FALSE(IsUnallocated(1));
EXPECT_FALSE(IsUnallocated(2));
EXPECT_TRUE(IsUnallocated(3));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_FALSE(IsUnallocated(5));
Execute(0, graph.nodes().size() - 1);
EXPECT_TRUE(tensor5_ptr == (*graph.tensors())[5].data.raw);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithOptionals) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, -1, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithOptionalOutput) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{-1, 3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithLargeTensor) {
TestGraph graph({0, -1},
{
{{0}, {1}, {}},
{{1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].bytes = 40;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(1), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithPersistentTensor) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].allocation_type = kTfLiteArenaRwPersistent;
graph.SetVariables({1});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE((*graph.tensors())[0].data.raw, (*graph.tensors())[1].data.raw);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), 0);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithDynamicTensor) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].allocation_type = kTfLiteDynamic;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ((*graph.tensors())[1].data.raw, nullptr);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, LargerGraphAndStepwiseAllocation) {
TestGraph graph({0, 1},
{
{{0, 1}, {2, 3}, {}},
{{2, 0}, {4, 5}, {6}},
{{1, -1}, {7}, {}},
{{7, 3}, {8}, {9}},
{{4, 5, 8}, {10}, {}},
},
{10});
SetGraph(&graph);
Execute(0, 0);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_TRUE(IsUnallocated(6));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_TRUE(IsUnallocated(5));
EXPECT_TRUE(IsUnallocated(7));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(1, 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_TRUE(IsUnallocated(7));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(2, 2);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(3, 3);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_TRUE(IsUnallocated(10));
Execute(4, 4);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_EQ(GetOffset(10), 12);
}
TEST_F(ArenaPlannerTest, ModifiedGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
TestGraph pruned_graph({0, 1},
{
{{0, 1}, {3}, {}},
},
{3});
SwapGraph(&pruned_graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
}
TEST_F(ArenaPlannerTest, ModifiedGraph_DeallocateNonPersistentArena) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
AcquireNonPersistentMemory();
AcquireNonPersistentMemory();
EXPECT_TRUE(HasNonPersistentMemory());
ReleaseNonPersistentMemory();
EXPECT_FALSE(HasNonPersistentMemory());
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), 0);
EXPECT_EQ(GetOffset(3), 0);
TestGraph pruned_graph({0, 1},
{
{{0, 1}, {3}, {}},
},
{3});
SwapGraph(&pruned_graph);
Execute(0, graph.nodes().size() - 1);
AcquireNonPersistentMemory();
EXPECT_TRUE(HasNonPersistentMemory());
ReleaseNonPersistentMemory();
AcquireNonPersistentMemory();
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
}
TEST_F(ArenaPlannerTest, ComplexGraph) {
TestGraph graph({0},
{
{{0}, {1}, {}},
{{1}, {2}, {}},
{{1}, {3}, {}},
{{1}, {4}, {}},
{{2, 3, 4}, {5}, {}},
{{5}, {6}, {}},
{{5}, {7}, {}},
{{6, 7}, {8}, {}},
},
{8});
(*graph.tensors())[0].bytes = 32;
(*graph.tensors())[1].bytes = 28;
(*graph.tensors())[2].bytes = 36;
(*graph.tensors())[3].bytes = 16;
(*graph.tensors())[4].bytes = 8;
(*graph.tensors())[5].bytes = 64;
(*graph.tensors())[6].bytes = 10;
(*graph.tensors())[7].bytes = 40;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 32);
EXPECT_EQ(GetOffset(7), |
714 | cpp | tensorflow/tensorflow | util | third_party/xla/xla/mlir/tools/mlir_interpreter/dialects/util.cc | third_party/xla/xla/util_test.cc | #ifndef XLA_MLIR_TOOLS_MLIR_INTERPRETER_DIALECTS_UTIL_H_
#define XLA_MLIR_TOOLS_MLIR_INTERPRETER_DIALECTS_UTIL_H_
#include <cstdint>
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Interfaces/ViewLikeInterface.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
namespace mlir {
namespace interpreter {
struct OffsetsSizesStrides {
llvm::SmallVector<int64_t> offsets;
llvm::SmallVector<int64_t> sizes;
llvm::SmallVector<int64_t> strides;
};
SmallVector<int64_t> ReplaceDynamicVals(ArrayRef<int64_t> static_vals,
ArrayRef<InterpreterValue>& args);
SmallVector<int64_t> ReplaceDynamicVals(ArrayRef<int64_t> static_vals,
ArrayRef<int64_t> dynamic_vals);
OffsetsSizesStrides ExtractOffsetsSizesStrides(
ArrayRef<InterpreterValue> args, OffsetSizeAndStrideOpInterface op);
OffsetsSizesStrides ExtractOffsetsSizesStrides(
ArrayRef<int64_t> dynamic_offsets, ArrayRef<int64_t> dynamic_sizes,
ArrayRef<int64_t> dynamic_strides, OffsetSizeAndStrideOpInterface op);
InterpreterValue ReshapeTensor(const InterpreterValue& in,
ArrayRef<int64_t> shape);
InterpreterValue GetInitOperand(mlir::Operation* op, int64_t index,
MutableArrayRef<InterpreterValue> args);
InterpreterValue GetInitOperand(mlir::ValueRange values, int64_t index,
ArrayRef<InterpreterValue> args);
InterpreterValue TransposeImpl(const InterpreterValue& in,
ArrayRef<int64_t> permutation);
int64_t DimImpl(const InterpreterValue& in, int64_t index,
InterpreterState& state);
llvm::SmallVector<InterpreterValue> NoOpTerminator(
MutableArrayRef<InterpreterValue> args, mlir::Operation*,
InterpreterState&);
int64_t EvalAffineExpr(AffineExpr expr, ArrayRef<int64_t> dims,
ArrayRef<int64_t> symbols);
llvm::SmallVector<int64_t> EvalAffineMap(AffineMap map, ArrayRef<int64_t> dims,
ArrayRef<int64_t> symbols);
llvm::SmallVector<int64_t> EvalAffineMap(AffineMap map,
ArrayRef<int64_t> operands);
}
}
#endif
#include "xla/mlir/tools/mlir_interpreter/dialects/util.h"
#include <cassert>
#include <cstdint>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Interfaces/ViewLikeInterface.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
namespace mlir {
namespace interpreter {
SmallVector<int64_t> ReplaceDynamicVals(llvm::ArrayRef<int64_t> static_vals,
ArrayRef<InterpreterValue>& args) {
llvm::SmallVector<int64_t> out;
for (int64_t val : static_vals) {
if (ShapedType::isDynamic(val)) {
out.push_back(std::get<int64_t>(args.front().storage));
args = args.drop_front(1);
} else {
out.push_back(val);
}
}
return out;
}
SmallVector<int64_t> ReplaceDynamicVals(ArrayRef<int64_t> static_vals,
ArrayRef<int64_t> dynamic_vals) {
llvm::SmallVector<int64_t> out;
for (int64_t val : static_vals) {
if (ShapedType::isDynamic(val)) {
out.push_back(dynamic_vals.front());
dynamic_vals = dynamic_vals.drop_front(1);
} else {
out.push_back(val);
}
}
assert(dynamic_vals.empty() && "expected no leftover dynamic values");
return out;
}
OffsetsSizesStrides ExtractOffsetsSizesStrides(
ArrayRef<InterpreterValue> args, OffsetSizeAndStrideOpInterface op) {
auto offsets = ReplaceDynamicVals(op.getStaticOffsets(), args);
auto sizes = ReplaceDynamicVals(op.getStaticSizes(), args);
auto strides = ReplaceDynamicVals(op.getStaticStrides(), args);
return {offsets, sizes, strides};
}
OffsetsSizesStrides ExtractOffsetsSizesStrides(
ArrayRef<int64_t> dynamic_offsets, ArrayRef<int64_t> dynamic_sizes,
ArrayRef<int64_t> dynamic_strides, OffsetSizeAndStrideOpInterface op) {
auto offsets = ReplaceDynamicVals(op.getStaticOffsets(), dynamic_offsets);
auto sizes = ReplaceDynamicVals(op.getStaticSizes(), dynamic_sizes);
auto strides = ReplaceDynamicVals(op.getStaticStrides(), dynamic_strides);
return {offsets, sizes, strides};
}
InterpreterValue ReshapeTensor(const InterpreterValue& in,
ArrayRef<int64_t> shape) {
auto out = in.TypedAlike(shape);
for (const auto& [in_index, out_index] :
llvm::zip(in.View().Indices(), out.View().Indices())) {
out.InsertElement(out_index, in.ExtractElement(in_index));
}
return out;
}
InterpreterValue GetInitOperand(mlir::Operation* op, int64_t index,
MutableArrayRef<InterpreterValue> args) {
return GetInitOperand(op->getOperands(), index, args);
}
InterpreterValue GetInitOperand(mlir::ValueRange values, int64_t index,
ArrayRef<InterpreterValue> args) {
assert(args.size() == values.size() && "expected matching sizes");
return mlir::isa<TensorType>(values[index].getType()) ? args[index].Clone()
: args[index];
}
InterpreterValue TransposeImpl(const InterpreterValue& in,
ArrayRef<int64_t> permutation) {
auto out = in;
auto& view = out.View();
view.sizes.clear();
view.strides.clear();
for (int64_t p : permutation) {
view.sizes.push_back(in.View().sizes[p]);
view.strides.push_back(in.View().strides[p]);
}
return out;
}
int64_t DimImpl(const InterpreterValue& in, int64_t index,
InterpreterState& state) {
if (index < 0 || index >= in.View().Rank()) {
state.AddFailure("dimension index out of bounds");
return 0;
}
return in.View().sizes[index];
}
llvm::SmallVector<InterpreterValue> NoOpTerminator(
MutableArrayRef<InterpreterValue> args, mlir::Operation*,
InterpreterState&) {
return llvm::to_vector(args);
}
int64_t EvalAffineExpr(AffineExpr expr, ArrayRef<int64_t> dims,
ArrayRef<int64_t> symbols) {
int64_t lhs = 0, rhs = 0;
if (auto bin = expr.dyn_cast<AffineBinaryOpExpr>()) {
lhs = EvalAffineExpr(bin.getLHS(), dims, symbols);
rhs = EvalAffineExpr(bin.getRHS(), dims, symbols);
}
switch (expr.getKind()) {
case AffineExprKind::Add:
return lhs + rhs;
case AffineExprKind::Mul:
return lhs * rhs;
case AffineExprKind::Mod:
return llvm::mod(lhs, rhs);
case AffineExprKind::FloorDiv:
return llvm::divideFloorSigned(lhs, rhs);
case AffineExprKind::CeilDiv:
return llvm::divideCeilSigned(lhs, rhs);
case AffineExprKind::Constant:
return expr.cast<AffineConstantExpr>().getValue();
case AffineExprKind::DimId:
return dims[expr.cast<AffineDimExpr>().getPosition()];
case AffineExprKind::SymbolId:
return symbols[expr.cast<AffineSymbolExpr>().getPosition()];
}
}
SmallVector<int64_t> EvalAffineMap(AffineMap map, ArrayRef<int64_t> dims,
ArrayRef<int64_t> symbols) {
SmallVector<int64_t> result;
for (auto expr : map.getResults()) {
result.push_back(EvalAffineExpr(expr, dims, symbols));
}
return result;
}
llvm::SmallVector<int64_t> EvalAffineMap(AffineMap map,
ArrayRef<int64_t> operands) {
return EvalAffineMap(map, operands.take_front(map.getNumDims()),
operands.drop_front(map.getNumDims()));
}
}
} | #include "xla/util.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <list>
#include <memory>
#include <numeric>
#include <set>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "xla/maybe_owning.h"
#include "xla/test.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace {
TEST(UtilTest, ReindentsDifferentNumberOfLeadingSpacesUniformly) {
std::string original = R"( hello there
world)";
std::string got = Reindent(original, " ");
std::string want = R"( hello there
world)";
EXPECT_EQ(want, got);
}
TEST(UtilTest, HumanReadableNumFlopsExample) {
ASSERT_EQ("1.00GFLOP/s", HumanReadableNumFlops(1e9, 1e9));
}
TEST(UtilTest, CommaSeparatedString) {
EXPECT_EQ(CommaSeparatedString({}), "");
EXPECT_EQ(CommaSeparatedString({"hello world"}), "hello world");
EXPECT_EQ(CommaSeparatedString({1, 57, 2}, "foo", "bar"), "foo1, 57, 2bar");
}
TEST(UtilTest, VectorString) {
std::list<int64_t> empty_list;
EXPECT_EQ(VectorString(empty_list), "()");
std::vector<float> float_vector = {5.5};
EXPECT_EQ(VectorString(float_vector), "(5.5)");
std::set<std::string_view> string_set = {std::string_view("a"),
std::string_view("b")};
EXPECT_EQ(VectorString(string_set), "(a, b)");
EXPECT_EQ(VectorString({}), "()");
EXPECT_EQ(VectorString({1, 57, 2}), "(1, 57, 2)");
}
TEST(UtilTest, LogLines) {
LogLines(tsl::INFO, "hello\n\nworld", __FILE__, __LINE__);
}
TEST(UtilTest, CommonFactors) {
struct {
std::vector<int64_t> a, b;
absl::InlinedVector<std::pair<int64_t, int64_t>, 8> expected;
} test_cases[] = {
{{0}, {0}, {{0, 0}, {1, 1}}},
{{1}, {}, {{0, 0}, {1, 0}}},
{{}, {1}, {{0, 0}, {0, 1}}},
{{0, 10}, {0, 10, 3}, {{0, 0}, {2, 3}}},
{{1, 0}, {1, 0, 1},
{{0, 0}, {1, 1}, {2, 2}, {2, 3}}},
{{0, 1}, {0, 1}, {{0, 0}, {1, 1}, {2, 2}}},
{{}, {}, {{0, 0}}},
{{2, 5, 1, 3},
{1, 10, 3, 1},
{{0, 0}, {0, 1}, {2, 2}, {3, 2}, {4, 3}, {4, 4}}},
{{1, 1, 3},
{1, 1, 3},
{{0, 0}, {1, 1}, {2, 2}, {3, 3}}},
{{2, 6},
{4, 3},
{{0, 0}, {2, 2}}},
{{1, 2, 6},
{4, 1, 3, 1},
{{0, 0}, {1, 0}, {3, 3}, {3, 4}}},
{{1, 2, 1},
{1, 1, 1, 2},
{{0, 0}, {1, 1}, {1, 2}, {1, 3}, {2, 4}, {3, 4}}}};
for (const auto& test_case : test_cases) {
EXPECT_EQ(test_case.expected, CommonFactors(test_case.a, test_case.b));
}
}
TEST(UtilTest, SanitizeFileName) {
EXPECT_EQ(SanitizeFileName(""), "");
EXPECT_EQ(SanitizeFileName("abc"), "abc");
EXPECT_EQ(SanitizeFileName("/\\[]"), "____");
EXPECT_EQ(SanitizeFileName("/A\\B[C]"), "_A_B_C_");
}
TEST(UtilTest, RoundTripFpToString) {
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<tsl::float8_e5m2>(
false, QuietNanWithoutPayload<tsl::float8_e5m2>())),
"nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<tsl::float8_e5m2>(
true, QuietNanWithoutPayload<tsl::float8_e5m2>())),
"-nan");
EXPECT_EQ(
RoundTripFpToString(std::numeric_limits<tsl::float8_e4m3fn>::quiet_NaN()),
"nan");
EXPECT_EQ(RoundTripFpToString(
-std::numeric_limits<tsl::float8_e4m3fn>::quiet_NaN()),
"-nan");
EXPECT_EQ(RoundTripFpToString(
std::numeric_limits<tsl::float8_e4m3b11fnuz>::quiet_NaN()),
"-nan");
EXPECT_EQ(RoundTripFpToString(
std::numeric_limits<tsl::float8_e4m3fnuz>::quiet_NaN()),
"-nan");
EXPECT_EQ(RoundTripFpToString(
std::numeric_limits<tsl::float8_e5m2fnuz>::quiet_NaN()),
"-nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<half>(
false, QuietNanWithoutPayload<half>())),
"nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<half>(
true, QuietNanWithoutPayload<half>())),
"-nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<bfloat16>(
false, QuietNanWithoutPayload<bfloat16>())),
"nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<bfloat16>(
true, QuietNanWithoutPayload<bfloat16>())),
"-nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<float>(
false, QuietNanWithoutPayload<float>())),
"nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<float>(
true, QuietNanWithoutPayload<float>())),
"-nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<double>(
false, QuietNanWithoutPayload<double>())),
"nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<double>(
true, QuietNanWithoutPayload<double>())),
"-nan");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<half>(false, 0x1)),
"nan(0x1)");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<half>(true, 0x1)),
"-nan(0x1)");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<bfloat16>(false, 0x1)),
"nan(0x1)");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<bfloat16>(true, 0x1)),
"-nan(0x1)");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<float>(false, 0x1)),
"nan(0x1)");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<float>(true, 0x1)),
"-nan(0x1)");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<double>(false, 0x1)),
"nan(0x1)");
EXPECT_EQ(RoundTripFpToString(NanWithSignAndPayload<double>(true, 0x1)),
"-nan(0x1)");
}
namespace {
template <typename T>
void TotalOrderHelper(T x, T y) {
auto x_sm = ToSignMagnitude(x);
bool x_sign = static_cast<bool>(Eigen::numext::signbit(x));
bool y_sign = static_cast<bool>(Eigen::numext::signbit(y));
auto y_sm = ToSignMagnitude(y);
if (x_sign && !y_sign) {
EXPECT_LT(x_sm, y_sm) << x << " " << y;
}
if (!x_sign && y_sign) {
EXPECT_GT(x_sm, y_sm) << x << " " << y;
}
if (x == y && x_sign == y_sign) {
EXPECT_EQ(x_sm, y_sm) << x << " " << y;
}
if (x < y) {
EXPECT_LT(x_sm, y_sm) << x << " " << y;
}
if (x > y) {
EXPECT_GT(x_sm, y_sm) << x << " " << y;
}
if (Eigen::numext::isnan(x) && x_sign && !Eigen::numext::isnan(y)) {
EXPECT_LT(x_sm, y_sm) << x << " " << y;
}
if (Eigen::numext::isnan(x) && !x_sign && !Eigen::numext::isnan(y)) {
EXPECT_GT(x_sm, y_sm) << x << " " << y;
}
if (Eigen::numext::isnan(y) && y_sign && !Eigen::numext::isnan(x)) {
EXPECT_GT(x_sm, y_sm) << x << " " << y;
}
if (Eigen::numext::isnan(y) && !y_sign && !Eigen::numext::isnan(x)) {
EXPECT_LT(x_sm, y_sm) << x << " " << y;
}
}
}
TEST(UtilTest, TotalOrder_F8E5M2) {
for (int a = 0; a < 256; ++a) {
tsl::float8_e5m2 x =
Eigen::numext::bit_cast<tsl::float8_e5m2>(static_cast<uint8_t>(a));
for (int b = 0; b < 256; ++b) {
tsl::float8_e5m2 y =
Eigen::numext::bit_cast<tsl::float8_e5m2>(static_cast<uint8_t>(b));
TotalOrderHelper(x, y);
}
}
}
TEST(UtilTest, TotalOrder_F8E4M3FN) {
for (int a = 0; a < 256; ++a) {
tsl::float8_e4m3fn x =
Eigen::numext::bit_cast<tsl::float8_e4m3fn>(static_cast<uint8_t>(a));
for (int b = 0; b < 256; ++b) {
tsl::float8_e4m3fn y =
Eigen::numext::bit_cast<tsl::float8_e4m3fn>(static_cast<uint8_t>(b));
TotalOrderHelper(x, y);
}
}
}
TEST(UtilTest, TotalOrder_F8E4M3B11) {
for (int a = 0; a < 256; ++a) {
tsl::float8_e4m3b11fnuz x =
Eigen::numext::bit_cast<tsl::float8_e4m3b11fnuz>(
static_cast<uint8_t>(a));
for (int b = 0; b < 256; ++b) {
tsl::float8_e4m3b11fnuz y =
Eigen::numext::bit_cast<tsl::float8_e4m3b11fnuz>(
static_cast<uint8_t>(b));
TotalOrderHelper(x, y);
}
}
}
TEST(UtilTest, TotalOrder_F8E4M3FNUZ) {
for (int a = 0; a < 256; ++a) {
tsl::float8_e4m3fnuz x =
Eigen::numext::bit_cast<tsl::float8_e4m3fnuz>(static_cast<uint8_t>(a));
for (int b = 0; b < 256; ++b) {
tsl::float8_e4m3fnuz y = Eigen::numext::bit_cast<tsl::float8_e4m3fnuz>(
static_cast<uint8_t>(b));
TotalOrderHelper(x, y);
}
}
}
TEST(UtilTest, TotalOrder_F8E5M2FNUZ) {
for (int a = 0; a < 256; ++a) {
tsl::float8_e5m2fnuz x =
Eigen::numext::bit_cast<tsl::float8_e5m2fnuz>(static_cast<uint8_t>(a));
for (int b = 0; b < 256; ++b) {
tsl::float8_e5m2fnuz y = Eigen::numext::bit_cast<tsl::float8_e5m2fnuz>(
static_cast<uint8_t>(b));
TotalOrderHelper(x, y);
}
}
}
void PackInt4(absl::Span<const char> input, absl::Span<char> output) {
CHECK_EQ(output.size(), CeilOfRatio(input.size(), size_t{2}));
for (size_t i = 0; i < input.size(); ++i) {
char val = input[i] & 0xf;
if (i % 2 == 0) {
output[i / 2] = val << 4;
} else {
output[i / 2] |= val;
}
}
}
TEST(UtilTest, PackInt4) {
std::vector<char> input(7);
std::iota(input.begin(), input.end(), 0);
std::vector<char> output_ref(CeilOfRatio<int64_t>(input.size(), 2));
PackInt4(input, absl::MakeSpan(output_ref));
std::vector<char> output_dut(CeilOfRatio<int64_t>(input.size(), 2));
PackIntN(4, input, absl::MakeSpan(output_dut));
for (size_t i = 0; i < output_dut.size(); ++i) {
EXPECT_EQ(output_ref[i], output_dut[i]) << i;
}
std::vector<char> unpacked(input.size());
UnpackIntN(4, output_ref, absl::MakeSpan(unpacked));
for (size_t i = 0; i < input.size(); ++i) {
EXPECT_EQ(unpacked[i], input[i]) << i;
}
}
TEST(UtilTest, MaybeOwningTestNull) {
MaybeOwning<char> m(nullptr);
EXPECT_EQ(m.get(), nullptr);
EXPECT_EQ(m.get_mutable(), nullptr);
}
TEST(UtilTest, MaybeOwningTestOwning) {
MaybeOwning<char> m(std::make_unique<char>());
*m.get_mutable() = 'a';
EXPECT_EQ(*m, 'a');
}
TEST(UtilTest, MaybeOwningTestShared) {
auto owner = std::make_unique<char>();
*owner = 'x';
MaybeOwning<char> c1(owner.get());
MaybeOwning<char> c2(owner.get());
EXPECT_EQ(*c1, 'x');
EXPECT_EQ(*c2, 'x');
EXPECT_EQ(c1.get(), c2.get());
}
}
} |
715 | cpp | tensorflow/tensorflow | simple_memory_arena | tensorflow/lite/simple_memory_arena.cc | tensorflow/lite/simple_memory_arena_test.cc | #ifndef TENSORFLOW_LITE_SIMPLE_MEMORY_ARENA_H_
#define TENSORFLOW_LITE_SIMPLE_MEMORY_ARENA_H_
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
struct ArenaAllocWithUsageInterval {
ArenaAllocWithUsageInterval() { reset(); }
size_t offset;
size_t size;
int32_t tensor;
int32_t first_node;
int32_t last_node;
inline void reset() {
offset = 0;
size = 0;
tensor = -1;
first_node = -1;
last_node = -1;
}
inline bool operator<(const ArenaAllocWithUsageInterval& other) const {
return offset < other.offset;
}
};
struct PointerAlignedPointerPair {
char* pointer;
char* aligned_pointer;
};
class ResizableAlignedBuffer {
public:
ResizableAlignedBuffer(size_t alignment, int subgraph_index)
: buffer_{nullptr, nullptr},
data_size_(0),
alignment_(alignment),
subgraph_index_(subgraph_index) {
(void)subgraph_index_;
}
~ResizableAlignedBuffer() { Release(); }
bool Resize(size_t new_size);
void Release();
char* GetPtr() const { return buffer_.aligned_pointer; }
size_t GetSize() const { return data_size_; }
size_t GetAlignment() const { return alignment_; }
private:
ResizableAlignedBuffer(const ResizableAlignedBuffer&) = delete;
ResizableAlignedBuffer& operator=(const ResizableAlignedBuffer&) = delete;
ResizableAlignedBuffer(ResizableAlignedBuffer&&) = delete;
ResizableAlignedBuffer& operator=(ResizableAlignedBuffer&&) = delete;
PointerAlignedPointerPair buffer_;
size_t data_size_;
size_t alignment_;
int subgraph_index_;
};
class SimpleMemoryArena {
public:
explicit SimpleMemoryArena(size_t arena_alignment, int subgraph_index = 0)
: committed_(false),
high_water_mark_(0),
underlying_buffer_(arena_alignment, subgraph_index),
active_allocs_() {}
void ResetAllocs();
void PurgeActiveAllocs(int32_t node);
void PurgeAfter(int32_t node);
void CalculateActiveAllocs(
const std::vector<ArenaAllocWithUsageInterval>& allocs, int32_t node);
TfLiteStatus Allocate(TfLiteContext* context, size_t alignment, size_t size,
int32_t tensor, int32_t first_node, int32_t last_node,
ArenaAllocWithUsageInterval* new_alloc);
TfLiteStatus Commit(bool* arena_reallocated);
TfLiteStatus ResolveAlloc(TfLiteContext* context,
const ArenaAllocWithUsageInterval& alloc,
char** output_ptr);
TfLiteStatus ClearPlan();
TfLiteStatus ReleaseBuffer();
size_t GetBufferSize() const { return underlying_buffer_.GetSize(); }
std::intptr_t BasePointer() const {
return reinterpret_cast<std::intptr_t>(underlying_buffer_.GetPtr());
}
void DumpDebugInfo(const std::string& name,
const std::vector<int>& execution_plan) const;
private:
bool committed_;
size_t high_water_mark_;
ResizableAlignedBuffer underlying_buffer_;
std::vector<ArenaAllocWithUsageInterval> active_allocs_;
};
}
#endif
#include "tensorflow/lite/simple_memory_arena.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/macros.h"
#ifdef TF_LITE_TENSORFLOW_PROFILER
#include "tensorflow/lite/tensorflow_profiler_logger.h"
#endif
#if defined(__ANDROID__)
#define TF_LITE_HAS_ALIGNED_ALLOC (__ANDROID_API__ >= 28)
#elif defined(__APPLE__)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif defined(_WIN32)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif __cplusplus >= 201703L || __STDC_VERSION__ >= 201112L
#define TF_LITE_HAS_ALIGNED_ALLOC 1
#endif
namespace {
template <typename T>
T AlignTo(size_t alignment, T offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer);
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment);
#if defined(_WIN32)
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(_aligned_malloc(size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
_aligned_free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(
_aligned_realloc(old_buffer.pointer, new_size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
#else
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
#if TF_LITE_HAS_ALIGNED_ALLOC
const size_t allocation_size = AlignTo(alignment, size + alignment - 1);
char* pointer =
reinterpret_cast<char*>(::aligned_alloc(alignment, allocation_size));
char* aligned_ptr = pointer;
#else
const size_t allocation_size = size + alignment - 1;
char* pointer = reinterpret_cast<char*>(std::malloc(allocation_size));
char* aligned_ptr = reinterpret_cast<char*>(
AlignTo(alignment, reinterpret_cast<std::uintptr_t>(pointer)));
#endif
#if defined(__clang__)
#if __has_feature(memory_sanitizer)
std::memset(pointer, 0, allocation_size);
#endif
#endif
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
std::free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
tflite::PointerAlignedPointerPair new_buffer =
AlignedAlloc(new_size, alignment);
if (new_size > 0 && old_size > 0) {
const size_t copy_amount = std::min(new_size, old_size);
std::memcpy(new_buffer.aligned_pointer, old_buffer.aligned_pointer,
copy_amount);
}
AlignedFree(old_buffer);
return new_buffer;
}
#endif
}
namespace tflite {
bool ResizableAlignedBuffer::Resize(size_t new_size) {
if (new_size <= data_size_) {
return false;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(true);
OnTfLiteArenaAlloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
new_size);
if (data_size_ > 0) {
OnTfLiteArenaDealloc(subgraph_index_,
reinterpret_cast<std::uintptr_t>(this), data_size_);
}
#endif
auto new_buffer = AlignedRealloc(buffer_, data_size_, new_size, alignment_);
bool reallocated = (new_buffer.aligned_pointer != buffer_.aligned_pointer);
buffer_ = new_buffer;
data_size_ = new_size;
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(false);
#endif
return reallocated;
}
void ResizableAlignedBuffer::Release() {
if (buffer_.pointer == nullptr) {
return;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
OnTfLiteArenaDealloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
data_size_);
#endif
AlignedFree(buffer_);
buffer_.pointer = nullptr;
buffer_.aligned_pointer = nullptr;
data_size_ = 0;
}
void SimpleMemoryArena::PurgeAfter(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].first_node > node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::PurgeActiveAllocs(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].last_node < node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::CalculateActiveAllocs(
const std::vector<ArenaAllocWithUsageInterval>& allocs, int32_t node) {
active_allocs_.clear();
for (int i = 0; i < allocs.size(); ++i) {
if (allocs[i].first_node <= node && allocs[i].last_node >= node) {
active_allocs_.push_back(allocs[i]);
}
}
std::sort(active_allocs_.begin(), active_allocs_.end());
}
void SimpleMemoryArena::ResetAllocs() { active_allocs_.clear(); }
TfLiteStatus SimpleMemoryArena::Allocate(
TfLiteContext* context, size_t alignment, size_t size, int32_t tensor,
int32_t first_node, int32_t last_node,
ArenaAllocWithUsageInterval* new_alloc) {
TF_LITE_ENSURE(context, alignment <= underlying_buffer_.GetAlignment());
new_alloc->tensor = tensor;
new_alloc->first_node = first_node;
new_alloc->last_node = last_node;
new_alloc->size = size;
if (size == 0) {
new_alloc->offset = 0;
return kTfLiteOk;
}
const size_t kOffsetNotAssigned = std::numeric_limits<size_t>::max();
size_t best_offset = kOffsetNotAssigned;
size_t best_offset_fit = kOffsetNotAssigned;
size_t current_offset = 0;
for (const auto& alloc : active_allocs_) {
if (alloc.last_node < first_node || alloc.first_node > last_node) {
continue;
}
size_t aligned_current_offset = AlignTo(alignment, current_offset);
if (aligned_current_offset + size <= alloc.offset &&
alloc.offset - aligned_current_offset < best_offset_fit) {
best_offset = aligned_current_offset;
best_offset_fit = alloc.offset - current_offset;
}
current_offset = std::max(current_offset, alloc.offset + alloc.size);
if (best_offset_fit == 0) {
break;
}
}
if (best_offset == kOffsetNotAssigned) {
best_offset = AlignTo(alignment, current_offset);
}
high_water_mark_ = std::max(high_water_mark_, best_offset + size);
new_alloc->offset = best_offset;
auto insertion_it = std::upper_bound(active_allocs_.begin(),
active_allocs_.end(), *new_alloc);
active_allocs_.insert(insertion_it, *new_alloc);
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::Commit(bool* arena_reallocated) {
*arena_reallocated = underlying_buffer_.Resize(high_water_mark_);
committed_ = true;
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ResolveAlloc(
TfLiteContext* context, const ArenaAllocWithUsageInterval& alloc,
char** output_ptr) {
TF_LITE_ENSURE(context, committed_);
TF_LITE_ENSURE(context, output_ptr != nullptr);
TF_LITE_ENSURE(context,
underlying_buffer_.GetSize() >= (alloc.offset + alloc.size));
if (alloc.size == 0) {
*output_ptr = nullptr;
} else {
*output_ptr = underlying_buffer_.GetPtr() + alloc.offset;
}
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ClearPlan() {
committed_ = false;
high_water_mark_ = 0;
active_allocs_.clear();
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ReleaseBuffer() {
committed_ = false;
underlying_buffer_.Release();
return kTfLiteOk;
}
TFLITE_ATTRIBUTE_WEAK void DumpArenaInfo(
const std::string& name, const std::vector<int>& execution_plan,
size_t arena_size, const std::vector<ArenaAllocWithUsageInterval>& allocs) {
}
void SimpleMemoryArena::DumpDebugInfo(
const std::string& name, const std::vector<int>& execution_plan) const {
tflite::DumpArenaInfo(name, execution_plan, underlying_buffer_.GetSize(),
active_allocs_);
}
} | #include "tensorflow/lite/simple_memory_arena.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
void ReportError(TfLiteContext* context, const char* format, ...) {}
TEST(SimpleMemoryArenaTest, BasicArenaOperations) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[6];
arena.Allocate(&context, 32, 2047, 0, 1, 3, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 2, 5, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 3, 6, &allocs[2]);
arena.Allocate(&context, 32, 2047, 3, 5, 6, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 4, 6, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 6, 6, &allocs[5]);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 6144);
EXPECT_EQ(allocs[5].offset, 2048);
}
TEST(SimpleMemoryArenaTest, BasicZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval alloc;
ASSERT_EQ(arena.Allocate(&context, 32, 0, 0, 1, 2, &alloc), kTfLiteOk);
EXPECT_EQ(alloc.offset, 0);
EXPECT_EQ(alloc.size, 0);
char* resolved_ptr = nullptr;
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
EXPECT_EQ(resolved_ptr, nullptr);
}
TEST(SimpleMemoryArenaTest, InterleavedZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[4];
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 0, 0, 4, &allocs[0]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 0, 1, 1, 2, &allocs[1]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 1023, 2, 1, 2, &allocs[2]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 3, 3, 4, &allocs[3]), kTfLiteOk);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 0);
EXPECT_EQ(allocs[2].offset, 2048);
EXPECT_EQ(allocs[3].offset, 2048);
}
TEST(SimpleMemoryArenaTest, TestClearPlan) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 1, 2, &allocs[2]);
bool reallocated = false;
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
arena.ClearPlan();
arena.Allocate(&context, 32, 1023, 3, 0, 2, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 1, 2, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 1, 2, &allocs[5]);
arena.Commit(&reallocated);
ASSERT_FALSE(reallocated);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 1024);
EXPECT_EQ(allocs[5].offset, 2048);
arena.ClearPlan();
arena.Allocate(&context, 32, 4095, 6, 0, 2, &allocs[6]);
arena.Allocate(&context, 32, 4095, 7, 1, 2, &allocs[7]);
arena.Allocate(&context, 32, 4095, 8, 1, 2, &allocs[8]);
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[6].offset, 0);
EXPECT_EQ(allocs[7].offset, 4096);
EXPECT_EQ(allocs[8].offset, 8192);
}
TEST(SimpleMemoryArenaTest, TestPurgeAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[5];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.PurgeActiveAllocs(4);
arena.Allocate(&context, 32, 13, 3,
4, 5, &allocs[4]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[4], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[4].offset, 0);
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[0].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestResetAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.Allocate(&context, 32, 13, 0,
0, 3, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
EXPECT_EQ(resolved_ptr3, resolved_ptr2 + 2048);
arena.ResetAllocs();
arena.Allocate(&context, 32, 13, 0,
0, 2, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[3].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestClearBuffer) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.BasePointer(), 0);
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_NE(arena.BasePointer(), 0);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
class BufferAndPlanClearingTest : public ::testing::Test,
public ::testing::WithParamInterface<bool> {};
TEST_P(BufferAndPlanClearingTest, TestClearBufferAndClearPlan) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
if (GetParam()) {
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
} else {
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
}
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
INSTANTIATE_TEST_SUITE_P(BufferAndPlanClearingTest, BufferAndPlanClearingTest,
::testing::Values(true, false));
}
} |
716 | cpp | tensorflow/tensorflow | stderr_reporter | tensorflow/lite/stderr_reporter.cc | tensorflow/lite/stderr_reporter_test.cc | #ifndef TENSORFLOW_LITE_STDERR_REPORTER_H_
#define TENSORFLOW_LITE_STDERR_REPORTER_H_
#include <cstdarg>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
struct StderrReporter : public ErrorReporter {
int Report(const char* format, va_list args) override;
};
ErrorReporter* DefaultErrorReporter();
}
#endif
#include "tensorflow/lite/stderr_reporter.h"
#include <stdarg.h>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
int StderrReporter::Report(const char* format, va_list args) {
logging_internal::MinimalLogger::LogFormatted(TFLITE_LOG_ERROR, format, args);
return 0;
}
ErrorReporter* DefaultErrorReporter() {
static StderrReporter* error_reporter = new StderrReporter;
return error_reporter;
}
} | #include "tensorflow/lite/stderr_reporter.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/error_reporter.h"
namespace tflite {
namespace {
void CheckWritesToStderr(ErrorReporter *error_reporter) {
#ifndef TF_LITE_STRIP_ERROR_STRINGS
testing::internal::CaptureStderr();
#endif
TF_LITE_REPORT_ERROR(error_reporter, "Test: %d", 42);
#ifndef TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ("ERROR: Test: 42\n", testing::internal::GetCapturedStderr());
#endif
}
TEST(StderrReporterTest, DefaultErrorReporter_WritesToStderr) {
CheckWritesToStderr(DefaultErrorReporter());
}
TEST(StderrReporterTest, StderrReporter_WritesToStderr) {
StderrReporter stderr_reporter;
CheckWritesToStderr(&stderr_reporter);
}
}
} |
717 | cpp | tensorflow/tensorflow | array | third_party/xla/xla/python/ifrt/array.cc | third_party/xla/xla/python/ifrt/array_test.cc | #ifndef XLA_PYTHON_IFRT_ARRAY_H_
#define XLA_PYTHON_IFRT_ARRAY_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/value.h"
#include "xla/tsl/concurrency/ref_count.h"
namespace xla {
namespace ifrt {
class Client;
using Layout = ::xla::PjRtLayout;
enum class ArrayCopySemantics : int {
kAlwaysCopy = 0,
kReuseInput,
kDonateInput,
};
class Array : public llvm::RTTIExtends<Array, Value> {
public:
Array() = default;
Array(const Array&) = delete;
Array(Array&&) = delete;
Array& operator=(const Array&) = delete;
Array& operator=(Array&&) = delete;
virtual DType dtype() const = 0;
virtual const Shape& shape() const = 0;
virtual const Sharding& sharding() const = 0;
virtual std::shared_ptr<const Sharding> shared_ptr_sharding() const = 0;
virtual absl::StatusOr<std::unique_ptr<PjRtLayout>> layout() const = 0;
virtual absl::StatusOr<std::vector<tsl::RCReference<Array>>>
DisassembleIntoSingleDeviceArrays(ArrayCopySemantics semantics) = 0;
virtual absl::StatusOr<tsl::RCReference<Array>> FullyReplicatedShard(
ArrayCopySemantics semantics) = 0;
ABSL_MUST_USE_RESULT
virtual Future<> CopyToHostBuffer(
void* data, std::optional<absl::Span<const int64_t>> byte_strides,
ArrayCopySemantics semantics) = 0;
static char ID;
};
std::vector<Array*> MakeArrayPointerList(
absl::Span<const tsl::RCReference<Array>> arrays);
}
}
#endif
#include "xla/python/ifrt/array.h"
#include <memory>
#include <vector>
namespace xla {
namespace ifrt {
char Array::ID = 0;
std::vector<Array*> MakeArrayPointerList(
absl::Span<const tsl::RCReference<Array>> arrays) {
std::vector<Array*> result;
result.reserve(arrays.size());
for (const auto& array : arrays) {
result.push_back(array.get());
}
return result;
}
}
} | #include "xla/python/ifrt/array.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/mock.h"
namespace xla {
namespace ifrt {
namespace {
TEST(ArrayTest, MakeArrayPointerListTest) {
const int kNumArrays = 3;
std::vector<tsl::RCReference<Array>> arrays;
arrays.reserve(kNumArrays);
for (int i = 0; i < kNumArrays; ++i) {
arrays.push_back(tsl::MakeRef<MockArray>());
}
std::vector<Array*> array_pointer_list = MakeArrayPointerList(arrays);
ASSERT_THAT(array_pointer_list, testing::SizeIs(kNumArrays));
for (int i = 0; i < kNumArrays; ++i) {
EXPECT_THAT(array_pointer_list[i], arrays[i].get());
}
}
}
}
} |
718 | cpp | tensorflow/tensorflow | graph_info | tensorflow/lite/graph_info.cc | tensorflow/lite/graph_info_test.cc | #ifndef TENSORFLOW_LITE_GRAPH_INFO_H_
#define TENSORFLOW_LITE_GRAPH_INFO_H_
#include <stddef.h>
#include <cstdint>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
class GraphInfo {
public:
virtual ~GraphInfo() {}
virtual size_t num_tensors() const = 0;
virtual TfLiteTensor* tensor(size_t index) = 0;
virtual TfLiteTensor* tensors() = 0;
virtual size_t num_execution_nodes() const = 0;
virtual size_t num_total_nodes() const = 0;
virtual const TfLiteNode& node(size_t index) const = 0;
virtual const TfLiteRegistration& registration(size_t index) const = 0;
virtual size_t node_index(size_t index) const = 0;
virtual const std::vector<int>& inputs() const = 0;
virtual const std::vector<int>& outputs() const = 0;
virtual const std::vector<int>& variables() const = 0;
};
struct NodeSubset {
enum Type {
kTfUnexplored = 0,
kTfPartition,
kTfNonPartition
};
Type type = kTfUnexplored;
std::vector<int> nodes;
std::vector<int> input_tensors;
std::vector<int> output_tensors;
};
using ControlEdge = std::pair<int32_t, int32_t>;
using ControlEdges = std::vector<ControlEdge>;
TfLiteStatus PartitionGraphIntoIndependentNodeSubsets(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets, bool greedily,
const ControlEdges* control_edges = nullptr);
}
#endif
#include "tensorflow/lite/graph_info.h"
#include <algorithm>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
template <class T>
void Uniquefy(std::vector<T>* items) {
std::sort(items->begin(), items->end());
items->erase(std::unique(items->begin(), items->end()), items->end());
}
class PartitionGraphIntoIndependentNodeSubsetsImpl {
public:
PartitionGraphIntoIndependentNodeSubsetsImpl(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets, bool greedily,
const ControlEdges& control_edges)
: info_(info),
node_subsets_(node_subsets),
node_type_(info_->num_total_nodes(), NodeSubset::kTfNonPartition),
greedily_(greedily),
control_edges_(control_edges),
num_incoming_control_edges_(info_->num_execution_nodes(), 0) {
for (auto node_index : TfLiteIntArrayView(nodes_to_partition)) {
node_type_[node_index] = NodeSubset::kTfPartition;
}
Uniquefy(&control_edges_);
}
void Partition() {
node_subsets_->clear();
tensor_epochs_.clear();
tensor_epochs_.resize(info_->num_tensors(), kEpochAlwaysReady);
node_epochs_.clear();
node_epochs_.resize(info_->num_execution_nodes(), kEpochNotReady);
num_incoming_control_edges_.clear();
num_incoming_control_edges_.resize(info_->num_execution_nodes(), 0);
for (const auto& edge : control_edges_) {
++num_incoming_control_edges_[edge.second];
}
for (int node_index = 0; node_index < info_->num_execution_nodes();
node_index++) {
const TfLiteNode& node = info_->node(node_index);
for (int output_tensor_index : TfLiteIntArrayView(node.outputs)) {
if (output_tensor_index == kTfLiteOptionalTensor) continue;
tensor_epochs_[output_tensor_index] = kEpochNotReady;
}
}
while (true) {
BuildNodeSubset();
if (node_subsets_->back().nodes.empty()) {
node_subsets_->pop_back();
break;
}
}
for (int output_index : info_->outputs()) {
int output_epoch = tensor_epochs_[output_index];
if (output_epoch == kEpochAlwaysReady) {
continue;
}
NodeSubset& output_subset = (*node_subsets_)[output_epoch];
output_subset.output_tensors.push_back(output_index);
}
for (NodeSubset& node_subset : *node_subsets_) {
Uniquefy(&node_subset.input_tensors);
Uniquefy(&node_subset.output_tensors);
}
}
private:
enum {
kEpochNotReady = -1,
kEpochAlwaysReady = -2
};
bool UpdateNode(int node_index) {
const TfLiteNode& node = info_->node(node_index);
NodeSubset& current_subset = node_subsets_->back();
int current_epoch = node_subsets_->size() - 1;
if (node_epochs_[node_index] != kEpochNotReady) {
return false;
}
for (int input_tensor_index : TfLiteIntArrayView(node.inputs)) {
if (input_tensor_index != kTfLiteOptionalTensor &&
tensor_epochs_[input_tensor_index] == kEpochNotReady) {
return false;
}
}
if (num_incoming_control_edges_[node_index] != 0) {
return false;
}
int original_node_idx = info_->node_index(node_index);
if (current_subset.type == NodeSubset::kTfUnexplored) {
current_subset.type = node_type_[original_node_idx];
}
if (current_subset.type == node_type_[original_node_idx]) {
node_epochs_[node_index] = current_epoch;
current_subset.nodes.push_back(original_node_idx);
for (int output_tensor_index : TfLiteIntArrayView(node.outputs)) {
if (output_tensor_index == kTfLiteOptionalTensor) continue;
tensor_epochs_[output_tensor_index] = current_epoch;
}
for (int input_tensor_index : TfLiteIntArrayView(node.inputs)) {
if (input_tensor_index == kTfLiteOptionalTensor) {
continue;
}
int input_epoch = tensor_epochs_[input_tensor_index];
int node_epoch = current_epoch;
if (input_epoch != node_epoch) {
current_subset.input_tensors.push_back(input_tensor_index);
if (input_epoch >= 0) {
NodeSubset& input_subset = (*node_subsets_)[input_epoch];
input_subset.output_tensors.push_back(input_tensor_index);
}
}
}
for (auto edge_iter =
std::lower_bound(control_edges_.begin(), control_edges_.end(),
ControlEdge(node_index, 0));
edge_iter != control_edges_.end() && edge_iter->first == node_index;
++edge_iter) {
--num_incoming_control_edges_[edge_iter->second];
}
return true;
} else {
return false;
}
}
void BuildNodeSubset() {
node_subsets_->emplace_back(NodeSubset());
while (true) {
bool did_something = false;
for (int node_index = 0; node_index < info_->num_execution_nodes();
node_index++) {
if (UpdateNode(node_index)) {
did_something = true;
} else {
if (did_something && !greedily_) {
return;
}
}
}
if (!did_something) return;
}
}
const GraphInfo* info_;
std::vector<NodeSubset>* node_subsets_;
std::vector<NodeSubset::Type> node_type_;
std::vector<int> tensor_epochs_;
std::vector<int> node_epochs_;
const bool greedily_;
ControlEdges control_edges_;
std::vector<int> num_incoming_control_edges_;
};
}
TfLiteStatus PartitionGraphIntoIndependentNodeSubsets(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets, bool greedily,
const ControlEdges* control_edges) {
ControlEdges my_control_edges;
if (control_edges == nullptr) {
control_edges = &my_control_edges;
if (greedily) {
for (int last_op_with_side_effect = -1, node_index = 0;
node_index < info->num_execution_nodes(); ++node_index) {
const auto& node = info->node(node_index);
if (node.might_have_side_effect) {
if (last_op_with_side_effect != -1) {
my_control_edges.emplace_back(last_op_with_side_effect, node_index);
}
last_op_with_side_effect = node_index;
}
}
}
}
PartitionGraphIntoIndependentNodeSubsetsImpl(
info, nodes_to_partition, node_subsets, greedily, *control_edges)
.Partition();
return kTfLiteOk;
}
} | #include "tensorflow/lite/graph_info.h"
#include <stddef.h>
#include <algorithm>
#include <memory>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
using ::testing::Eq;
using ::testing::ExplainMatchResult;
using ::testing::Pointwise;
using NodeSubsets = std::vector<NodeSubset>;
TfLiteIntArray* ConvertVector(const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
}
class SimpleTestGraph : public GraphInfo {
public:
SimpleTestGraph(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<std::tuple<std::vector<int>, std::vector<int>, bool>>&
nodes,
int node_index_offset = 0)
: inputs_(inputs),
outputs_(outputs),
node_index_offset_(node_index_offset) {
NeedsTensors(inputs_);
NeedsTensors(outputs_);
for (int i = 0; i < node_index_offset; ++i) AddNode({}, {}, false);
for (const auto& [inputs, outputs, might_have_side_effect] : nodes) {
AddNode(inputs, outputs, might_have_side_effect);
}
registrations_.resize(nodes.size());
}
~SimpleTestGraph() override {
for (auto& node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
}
}
size_t num_total_nodes() const override { return nodes_.size(); }
size_t num_execution_nodes() const override {
return nodes_.size() - node_index_offset_;
}
const TfLiteNode& node(size_t index) const override {
return nodes_[index + node_index_offset_];
}
size_t node_index(size_t index) const override {
return index + node_index_offset_;
}
size_t num_tensors() const override { return tensors_.size(); }
const TfLiteRegistration& registration(size_t index) const override {
return registrations_[index + node_index_offset_];
}
TfLiteTensor* tensor(size_t index) override { return &tensors_[index]; }
TfLiteTensor* tensors() override { return tensors_.data(); }
const std::vector<int>& inputs() const override { return inputs_; }
const std::vector<int>& outputs() const override { return outputs_; }
const std::vector<int>& variables() const override { return variables_; }
private:
void AddNode(const std::vector<int>& inputs, const std::vector<int>& outputs,
bool might_have_side_effect) {
NeedsTensors(inputs);
NeedsTensors(outputs);
nodes_.push_back(TfLiteNode());
TfLiteNode& node = nodes_.back();
node.inputs = ConvertVector(inputs);
node.outputs = ConvertVector(outputs);
node.might_have_side_effect = might_have_side_effect;
}
void NeedsTensors(const std::vector<int>& tensors) {
for (const int tensor : tensors)
tensors_.resize(std::max<int>(tensor + 1, tensors_.size()));
}
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
std::vector<TfLiteRegistration> registrations_;
size_t node_index_offset_;
};
void PartitionGraphOrDie(const SimpleTestGraph& graph,
const std::vector<int>& nodes_to_partition,
NodeSubsets* subgraphs, const bool greedily,
const ControlEdges* control_edges) {
TfLiteIntArray* nodes_to_partition_int_array =
ConvertVector(nodes_to_partition);
ASSERT_EQ(PartitionGraphIntoIndependentNodeSubsets(
&graph, nodes_to_partition_int_array, subgraphs, greedily,
control_edges),
kTfLiteOk);
TfLiteIntArrayFree(nodes_to_partition_int_array);
}
NodeSubsets PartitionGraph(const SimpleTestGraph& graph,
const std::vector<int>& nodes_to_partition,
const bool greedily = true,
const ControlEdges* control_edges = nullptr) {
NodeSubsets subgraphs;
PartitionGraphOrDie(graph, nodes_to_partition, &subgraphs, greedily,
control_edges);
return subgraphs;
}
MATCHER(EqNodeSubset, "") {
const NodeSubset& a = std::get<0>(arg);
const NodeSubset& b = std::get<1>(arg);
if (a.type != b.type) {
*result_listener << "mismatched .type ";
return ExplainMatchResult(Eq(b.type), a.type, result_listener);
}
if (a.nodes != b.nodes) {
*result_listener << "mismatched .nodes ";
return ExplainMatchResult(Pointwise(Eq(), b.nodes), a.nodes,
result_listener);
}
if (a.input_tensors != b.input_tensors) {
*result_listener << "mismatched .input_tensors ";
return ExplainMatchResult(Pointwise(Eq(), b.input_tensors), a.input_tensors,
result_listener);
}
if (a.output_tensors != b.output_tensors) {
*result_listener << "mismatched .output_tensors ";
return ExplainMatchResult(Pointwise(Eq(), b.output_tensors),
a.output_tensors, result_listener);
}
return true;
}
TEST(PartitionTest, Nodes0PartitionNodes0) {
EXPECT_THAT(PartitionGraph({
{},
{},
{},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({})));
}
TEST(PartitionTest, Nodes0PartitionNodes0Tensors1) {
EXPECT_THAT(PartitionGraph({
{0},
{0},
{},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({})));
}
TEST(PartitionTest, Nodes1PartitionNodes0) {
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{0},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes0_WithOffset) {
constexpr int node_index_offset = 17;
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
node_index_offset,
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{node_index_offset},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes0Inputs0) {
EXPECT_THAT(
PartitionGraph({
{},
{0},
{
{{}, {0}, false},
},
},
{0}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{},
{0},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes1) {
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
},
{0}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes1) {
EXPECT_THAT(
PartitionGraph({
{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
},
{1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfPartition,
{1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes1_WithOffset) {
constexpr int node_index_offset = 17;
EXPECT_THAT(
PartitionGraph({{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
node_index_offset},
{node_index_offset + 1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{node_index_offset + 0},
{0},
{1},
},
{
NodeSubset::kTfPartition,
{node_index_offset + 1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes2) {
EXPECT_THAT(
PartitionGraph({
{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
},
{0, 1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{2},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2) {
EXPECT_THAT(
PartitionGraph({
{0},
{3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1, 2}, {3}, false},
},
},
{0, 2}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
{
NodeSubset::kTfPartition,
{2},
{1, 2},
{3},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2Greedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{2, 3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1}, {3}, false},
},
},
{0, 2}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 2},
{0},
{1, 3},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2ClusteredNonGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{2, 3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1}, {3}, false},
},
},
{0, 2},
false),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
{
NodeSubset::kTfPartition,
{2},
{1},
{3},
},
})));
}
TEST(PartitionTest, Nodes4PartitionNodes3_WithControlDependency) {
EXPECT_THAT(
PartitionGraph({
{0},
{4},
{
{{0}, {1}, true},
{{1}, {2}, true},
{{2}, {3}, false},
{{1, 3}, {}, true},
{{1}, {4}, true},
},
},
{0, 1, 3, 4}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{1, 2},
},
{
NodeSubset::kTfNonPartition,
{2},
{2},
{3},
},
{
NodeSubset::kTfPartition,
{3, 4},
{1, 3},
{4},
},
})));
}
TEST(PartitionTest, Nodes4PartitionNodes3_WithExternalControlDependency) {
const ControlEdges control_edges = {
{0, 1},
{1, 3},
{3, 4},
};
EXPECT_THAT(
PartitionGraph({
{0},
{4},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{2}, {3}, false},
{{1, 3}, {}, false},
{{1}, {4}, false},
},
},
{0, 1, 3, 4},
true, &control_edges),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{1, 2},
},
{
NodeSubset::kTfNonPartition,
{2},
{2},
{3},
},
{
NodeSubset::kTfPartition,
{3, 4},
{1, 3},
{4},
},
})));
}
TEST(PartitionTest, ComplexGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{4, 7},
{
{{0}, {1}, false},
{{1}, {2, 5}, false},
{{2}, {3}, false},
{{3}, {4}, false},
{{5}, {6}, false},
{{6}, {7}, false},
},
},
{0, 1, 4, 5}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1, 4, 5},
{0},
{2, 7},
},
{
NodeSubset::kTfNonPartition,
{2, 3},
{2},
{4},
},
})));
}
TEST(PartitionTest, ComplexNonGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{4, 7},
{
{{0}, {1}, false},
{{1}, {2, 5}, false},
{{2}, {3}, false},
{{3}, {4}, false},
{{5}, {6}, false},
{{6}, {7}, false},
},
},
{0, 1, 4, 5},
false),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{2, 5},
},
{
NodeSubset::kTfNonPartition,
{2, 3},
{2},
{4},
},
{
NodeSubset::kTfPartition,
{4, 5},
{5},
{7},
},
})));
}
}
} |
719 | cpp | tensorflow/tensorflow | string_util | tensorflow/lite/testing/string_util.cc | tensorflow/lite/string_util_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_STRING_UTIL_H_
#define TENSORFLOW_CORE_KERNELS_STRING_UTIL_H_
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
namespace tensorflow {
enum class UnicodeEncoding { UTF8, UTF16BE, UTF32BE };
enum class CharUnit { BYTE, UTF8_CHAR };
inline bool IsTrailByte(char x) { return static_cast<signed char>(x) < -0x40; }
Status ParseUnicodeEncoding(const string& str, UnicodeEncoding* encoding);
Status ParseCharUnit(const string& str, CharUnit* unit);
int32 UTF8StrLen(const string& str);
template <typename T>
bool ForwardNUTF8CharPositions(const StringPiece in,
const T num_utf8_chars_to_shift, T* pos) {
const size_t size = in.size();
T utf8_chars_counted = 0;
while (utf8_chars_counted < num_utf8_chars_to_shift && *pos < size) {
do {
++*pos;
} while (*pos < size && IsTrailByte(in[*pos]));
++utf8_chars_counted;
}
return utf8_chars_counted == num_utf8_chars_to_shift;
}
template <typename T>
bool BackNUTF8CharPositions(const StringPiece in,
const T num_utf8_chars_to_shift, T* pos) {
const size_t start = 0;
T utf8_chars_counted = 0;
while (utf8_chars_counted < num_utf8_chars_to_shift && (*pos > start)) {
do {
--*pos;
} while (IsTrailByte(in[*pos]) && *pos > start);
++utf8_chars_counted;
}
return utf8_chars_counted == num_utf8_chars_to_shift;
}
}
#endif
#include "tensorflow/core/kernels/string_util.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
Status ParseUnicodeEncoding(const string& str, UnicodeEncoding* encoding) {
if (str == "UTF-8") {
*encoding = UnicodeEncoding::UTF8;
} else if (str == "UTF-16-BE") {
*encoding = UnicodeEncoding::UTF16BE;
} else if (str == "UTF-32-BE") {
*encoding = UnicodeEncoding::UTF32BE;
} else {
return errors::InvalidArgument(
strings::StrCat("Invalid encoding \"", str,
"\": Should be one of: UTF-8, UTF-16-BE, UTF-32-BE"));
}
return absl::OkStatus();
}
Status ParseCharUnit(const string& str, CharUnit* unit) {
if (str == "BYTE") {
*unit = CharUnit::BYTE;
} else if (str == "UTF8_CHAR") {
*unit = CharUnit::UTF8_CHAR;
} else {
return errors::InvalidArgument(strings::StrCat(
"Invalid unit \"", str, "\": Should be one of: BYTE, UTF8_CHAR"));
}
return absl::OkStatus();
}
int32 UTF8StrLen(const string& str) {
const int32_t byte_size = str.size();
const char* const end = str.data() + byte_size;
const char* ptr = str.data();
int32_t skipped_count = 0;
while (ptr < end) {
skipped_count += IsTrailByte(*ptr++) ? 1 : 0;
}
const int32_t result = byte_size - skipped_count;
return result;
}
} | #include "tensorflow/lite/string_util.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
TEST(StringUtil, TestStringUtil) {
Interpreter interpreter;
interpreter.AddTensors(3);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
TfLiteTensor* t1 = interpreter.tensor(1);
t1->type = kTfLiteString;
t1->allocation_type = kTfLiteDynamic;
union {
char raw_bytes[15];
struct {
int32_t num_strs;
int32_t offsets[2];
char str_data[3];
} tensor_data;
} data;
data.tensor_data = {1, {12, 15}, {'X', 'Y', 'Z'}};
TfLiteQuantization quant;
quant.type = kTfLiteNoQuantization;
quant.params = nullptr;
interpreter.SetTensorParametersReadOnly(
2, kTfLiteString, "", {1}, quant, data.raw_bytes, sizeof(data.raw_bytes));
TfLiteTensor* t2 = interpreter.tensor(2);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
char s0[] = "ABC";
string s1 = "DEFG";
char s2[] = "";
DynamicBuffer buf0;
ASSERT_EQ(buf0.AddString(s0, 3), kTfLiteOk);
DynamicBuffer buf1;
ASSERT_EQ(buf1.AddString(s1.data(), s1.length()), kTfLiteOk);
ASSERT_EQ(buf0.AddString(s2, 0), kTfLiteOk);
auto new_shape = TfLiteIntArrayCreate(2);
new_shape->data[0] = 2;
new_shape->data[1] = 1;
buf0.WriteToTensor(t0, new_shape);
buf1.WriteToTensorAsVector(t1);
EXPECT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 2);
EXPECT_EQ(t0->dims->data[1], 1);
EXPECT_EQ(t1->dims->size, 1);
EXPECT_EQ(t1->dims->data[0], 1);
ASSERT_EQ(GetStringCount(t0), 2);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "ABC");
str_ref = GetString(t0, 1);
ASSERT_EQ(string(str_ref.str, str_ref.len), "");
ASSERT_EQ(t0->bytes, 19);
ASSERT_EQ(GetStringCount(t1), 1);
str_ref = GetString(t1, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "DEFG");
ASSERT_EQ(t1->bytes, 16);
ASSERT_EQ(GetStringCount(t2), 1);
str_ref = GetString(t2, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "XYZ");
ASSERT_EQ(t2->bytes, 15);
}
TEST(StringUtil, AddStringOverflow32Length) {
const size_t max_size = 100;
DynamicBuffer buf{max_size};
std::string big_string(max_size + 1, 'A');
ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}),
kTfLiteError);
}
TEST(StringUtil, AddStringToFullBufferOverflow32Length) {
const size_t max_size = 100;
DynamicBuffer buf{max_size};
std::string big_string((max_size / 2) + 1, 'A');
ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}), kTfLiteOk);
EXPECT_EQ(buf.AddString({big_string.data(), big_string.length()}),
kTfLiteError);
}
TEST(StringUtil, TruncatesCharDataToLen) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
DynamicBuffer buf;
char fake_big[] = "ABCADASDA";
ASSERT_EQ(buf.AddString({fake_big, 3}), kTfLiteOk);
buf.WriteToTensorAsVector(t0);
StringRef added_string = GetString(t0, 0);
EXPECT_EQ(added_string.len, 3);
EXPECT_EQ(string(added_string.str, 3), "ABC");
}
TEST(StringUtil, TestAddJoinedStringCharSeparator) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
char s0[] = "";
char s1[] = "ABC";
char s2[] = "DEFG";
char s3[] = "";
char s4[] = "XYZ";
DynamicBuffer buf;
buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, ' ');
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 1);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), " ABC DEFG XYZ");
ASSERT_EQ(t0->bytes, 26);
}
TEST(StringUtil, TestAddJoinedStringStringRefSeparator) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
char s[] = " - ";
char s0[] = "";
char s1[] = "ABC";
char s2[] = "DEFG";
char s3[] = "";
char s4[] = "XYZ";
DynamicBuffer buf;
buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, {s, 3});
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 1);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), " - ABC - DEFG - - XYZ");
ASSERT_EQ(t0->bytes, 34);
}
TEST(StringUtil, TestEmptyList) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
DynamicBuffer buf;
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 0);
ASSERT_EQ(t0->bytes, 8);
}
TEST(StringUtil, TestShapes) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
t0->dims = TfLiteIntArrayCreate(2);
t0->dims->data[0] = 2;
t0->dims->data[1] = 1;
DynamicBuffer buf;
buf.AddString("ABC", 3);
buf.AddString("X", 1);
buf.WriteToTensor(t0, nullptr);
ASSERT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 2);
EXPECT_EQ(t0->dims->data[1], 1);
auto new_shape = TfLiteIntArrayCreate(2);
new_shape->data[0] = 1;
new_shape->data[1] = 2;
buf.WriteToTensor(t0, new_shape);
ASSERT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 1);
EXPECT_EQ(t0->dims->data[1], 2);
}
} |
720 | cpp | tensorflow/tensorflow | minimal_logging | tensorflow/lite/minimal_logging.cc | tensorflow/lite/minimal_logging_test.cc | #ifndef TENSORFLOW_LITE_MINIMAL_LOGGING_H_
#define TENSORFLOW_LITE_MINIMAL_LOGGING_H_
#include <cstdarg>
#include "tensorflow/lite/logger.h"
namespace tflite {
namespace logging_internal {
using tflite::LogSeverity;
class MinimalLogger {
public:
static void Log(LogSeverity severity, const char* format, ...);
static void LogFormatted(LogSeverity severity, const char* format,
va_list args);
static LogSeverity GetMinimumLogSeverity();
static LogSeverity SetMinimumLogSeverity(LogSeverity new_severity);
private:
static const char* GetSeverityName(LogSeverity severity);
static LogSeverity minimum_log_severity_;
};
}
}
#define TFLITE_LOG_PROD(severity, format, ...) \
if (severity >= \
::tflite::logging_internal::MinimalLogger::GetMinimumLogSeverity()) { \
::tflite::logging_internal::MinimalLogger::Log(severity, format, \
##__VA_ARGS__); \
}
#define TFLITE_LOG_PROD_ONCE(severity, format, ...) \
do { \
static const bool s_logged = [&] { \
TFLITE_LOG_PROD(severity, format, ##__VA_ARGS__) \
return true; \
}(); \
(void)s_logged; \
} while (false);
#ifndef NDEBUG
#define TFLITE_LOG TFLITE_LOG_PROD
#define TFLITE_LOG_ONCE TFLITE_LOG_PROD_ONCE
#else
#define TFLITE_LOG(severity, format, ...) \
while (false) { \
TFLITE_LOG_PROD(severity, format, ##__VA_ARGS__); \
}
#define TFLITE_LOG_ONCE TFLITE_LOG
#endif
#endif
#include "tensorflow/lite/minimal_logging.h"
#include <cstdarg>
namespace tflite {
namespace logging_internal {
void MinimalLogger::Log(LogSeverity severity, const char* format, ...) {
va_list args;
va_start(args, format);
LogFormatted(severity, format, args);
va_end(args);
}
const char* MinimalLogger::GetSeverityName(LogSeverity severity) {
switch (severity) {
case TFLITE_LOG_VERBOSE:
return "VERBOSE";
case TFLITE_LOG_INFO:
return "INFO";
case TFLITE_LOG_WARNING:
return "WARNING";
case TFLITE_LOG_ERROR:
return "ERROR";
case TFLITE_LOG_SILENT:
return "SILENT";
}
return "<Unknown severity>";
}
LogSeverity MinimalLogger::GetMinimumLogSeverity() {
return MinimalLogger::minimum_log_severity_;
}
LogSeverity MinimalLogger::SetMinimumLogSeverity(LogSeverity new_severity) {
LogSeverity old_severity = MinimalLogger::minimum_log_severity_;
MinimalLogger::minimum_log_severity_ = new_severity;
return old_severity;
}
}
} | #include "tensorflow/lite/minimal_logging.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/logger.h"
namespace tflite {
TEST(MinimalLogging, Basic) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Foo");
EXPECT_EQ("INFO: Foo\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, BasicFormatted) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Foo %s %s", "Bar", "Baz");
EXPECT_EQ("INFO: Foo Bar Baz\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Warn) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "One", "");
EXPECT_EQ("WARNING: One\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Error) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Two");
EXPECT_EQ("ERROR: Two\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, UnknownSeverity) {
testing::internal::CaptureStderr();
LogSeverity default_log_severity = TFLITE_LOG_INFO;
#if defined(__ANDROID__) || !defined(NDEBUG)
default_log_severity = TFLITE_LOG_VERBOSE;
#endif
EXPECT_EQ(tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
static_cast<LogSeverity>(-1)),
default_log_severity);
TFLITE_LOG_PROD(static_cast<LogSeverity>(-1), "Three");
EXPECT_EQ("<Unknown severity>: Three\n",
testing::internal::GetCapturedStderr());
tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
default_log_severity);
}
TEST(MinimalLogging, MinimumSeverity) {
testing::internal::CaptureStderr();
LogSeverity default_log_severity = TFLITE_LOG_INFO;
#if defined(__ANDROID__) || !defined(NDEBUG)
default_log_severity = TFLITE_LOG_VERBOSE;
#endif
EXPECT_EQ(tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
TFLITE_LOG_WARNING),
default_log_severity);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Foo");
TFLITE_LOG_PROD(default_log_severity, "Bar");
EXPECT_EQ("WARNING: Foo\n", testing::internal::GetCapturedStderr());
tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
default_log_severity);
}
TEST(MinimalLogging, Once) {
testing::internal::CaptureStderr();
for (int i = 0; i < 10; ++i) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO, "Count: %d", i);
}
EXPECT_EQ("INFO: Count: 0\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Debug) {
testing::internal::CaptureStderr();
TFLITE_LOG(TFLITE_LOG_INFO, "Foo");
TFLITE_LOG(TFLITE_LOG_WARNING, "Bar");
TFLITE_LOG(TFLITE_LOG_ERROR, "Baz");
#ifndef NDEBUG
EXPECT_EQ("INFO: Foo\nWARNING: Bar\nERROR: Baz\n",
testing::internal::GetCapturedStderr());
#else
EXPECT_TRUE(testing::internal::GetCapturedStderr().empty());
#endif
}
TEST(MinimalLogging, DebugOnce) {
testing::internal::CaptureStderr();
for (int i = 0; i < 10; ++i) {
TFLITE_LOG_ONCE(TFLITE_LOG_INFO, "Count: %d", i);
}
#ifndef NDEBUG
EXPECT_EQ("INFO: Count: 0\n", testing::internal::GetCapturedStderr());
#else
EXPECT_TRUE(testing::internal::GetCapturedStderr().empty());
#endif
}
} |
721 | cpp | tensorflow/tensorflow | label_image | tensorflow/lite/examples/label_image/label_image.cc | tensorflow/lite/examples/label_image/label_image_test.cc | #ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_LABEL_IMAGE_H_
#define TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_LABEL_IMAGE_H_
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace label_image {
struct Settings {
bool verbose = false;
bool accel = false;
TfLiteType input_type = kTfLiteFloat32;
bool profiling = false;
bool allow_fp16 = false;
bool gl_backend = false;
bool hexagon_delegate = false;
bool xnnpack_delegate = false;
int loop_count = 1;
float input_mean = 127.5f;
float input_std = 127.5f;
string model_name = "./mobilenet_quant_v1_224.tflite";
tflite::FlatBufferModel* model;
string input_bmp_name = "./grace_hopper.bmp";
string labels_file_name = "./labels.txt";
int number_of_threads = 4;
int number_of_results = 5;
int max_profiling_buffer_entries = 1024;
int number_of_warmup_runs = 2;
};
}
}
#endif
#include "tensorflow/lite/examples/label_image/label_image.h"
#include <fcntl.h>
#include <getopt.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
#include "tensorflow/lite/examples/label_image/log.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace label_image {
double get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr;
using ProvidedDelegateList = tflite::tools::ProvidedDelegateList;
class DelegateProviders {
public:
DelegateProviders() : delegate_list_util_(¶ms_) {
delegate_list_util_.AddAllDelegateParams();
delegate_list_util_.AppendCmdlineFlags(flags_);
params_.RemoveParam("help");
delegate_list_util_.RemoveCmdlineFlag(flags_, "help");
}
bool InitFromCmdlineArgs(int* argc, const char** argv) {
return Flags::Parse(argc, argv, flags_);
}
void MergeSettingsIntoParams(const Settings& s) {
if (s.gl_backend) {
if (!params_.HasParam("use_gpu")) {
LOG(WARN) << "GPU delegate execution provider isn't linked or GPU "
"delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_gpu", true);
if (params_.HasParam("gpu_inference_for_sustained_speed")) {
params_.Set<bool>("gpu_inference_for_sustained_speed", true);
}
params_.Set<bool>("gpu_precision_loss_allowed", s.allow_fp16);
}
}
if (s.accel) {
if (!params_.HasParam("use_nnapi")) {
LOG(WARN) << "NNAPI delegate execution provider isn't linked or NNAPI "
"delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_nnapi", true);
params_.Set<bool>("nnapi_allow_fp16", s.allow_fp16);
}
}
if (s.hexagon_delegate) {
if (!params_.HasParam("use_hexagon")) {
LOG(WARN) << "Hexagon delegate execution provider isn't linked or "
"Hexagon delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_hexagon", true);
params_.Set<bool>("hexagon_profiling", s.profiling);
}
}
if (s.xnnpack_delegate) {
if (!params_.HasParam("use_xnnpack")) {
LOG(WARN) << "XNNPACK delegate execution provider isn't linked or "
"XNNPACK delegate isn't supported on the platform!";
} else {
params_.Set<bool>("use_xnnpack", true);
params_.Set<int32_t>("num_threads", s.number_of_threads);
}
}
}
std::vector<ProvidedDelegateList::ProvidedDelegate> CreateAllDelegates()
const {
return delegate_list_util_.CreateAllRankedDelegates();
}
std::string GetHelpMessage(const std::string& cmdline) const {
return Flags::Usage(cmdline, flags_);
}
private:
tflite::tools::ToolParams params_;
ProvidedDelegateList delegate_list_util_;
std::vector<tflite::Flag> flags_;
};
TfLiteStatus ReadLabelsFile(const string& file_name,
std::vector<string>* result,
size_t* found_label_count) {
std::ifstream file(file_name);
if (!file) {
LOG(ERROR) << "Labels file " << file_name << " not found";
return kTfLiteError;
}
result->clear();
string line;
while (std::getline(file, line)) {
result->push_back(line);
}
*found_label_count = result->size();
const int padding = 16;
while (result->size() % padding) {
result->emplace_back();
}
return kTfLiteOk;
}
void PrintProfilingInfo(const profiling::ProfileEvent* e,
uint32_t subgraph_index, uint32_t op_index,
TfLiteRegistration registration) {
LOG(INFO) << std::fixed << std::setw(10) << std::setprecision(3)
<< (e->elapsed_time) / 1000.0 << ", Subgraph " << std::setw(3)
<< std::setprecision(3) << subgraph_index << ", Node "
<< std::setw(3) << std::setprecision(3) << op_index << ", OpCode "
<< std::setw(3) << std::setprecision(3) << registration.builtin_code
<< ", "
<< EnumNameBuiltinOperator(
static_cast<BuiltinOperator>(registration.builtin_code));
}
void RunInference(Settings* settings,
const DelegateProviders& delegate_providers) {
if (!settings->model_name.c_str()) {
LOG(ERROR) << "no model file name";
exit(-1);
}
std::unique_ptr<tflite::FlatBufferModel> model;
std::unique_ptr<tflite::Interpreter> interpreter;
model = tflite::FlatBufferModel::BuildFromFile(settings->model_name.c_str());
if (!model) {
LOG(ERROR) << "Failed to mmap model " << settings->model_name;
exit(-1);
}
settings->model = model.get();
LOG(INFO) << "Loaded model " << settings->model_name;
model->error_reporter();
LOG(INFO) << "resolved reporter";
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
if (!interpreter) {
LOG(ERROR) << "Failed to construct interpreter";
exit(-1);
}
interpreter->SetAllowFp16PrecisionForFp32(settings->allow_fp16);
if (settings->verbose) {
LOG(INFO) << "tensors size: " << interpreter->tensors_size();
LOG(INFO) << "nodes size: " << interpreter->nodes_size();
LOG(INFO) << "inputs: " << interpreter->inputs().size();
LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0);
int t_size = interpreter->tensors_size();
for (int i = 0; i < t_size; i++) {
if (interpreter->tensor(i)->name)
LOG(INFO) << i << ": " << interpreter->tensor(i)->name << ", "
<< interpreter->tensor(i)->bytes << ", "
<< interpreter->tensor(i)->type << ", "
<< interpreter->tensor(i)->params.scale << ", "
<< interpreter->tensor(i)->params.zero_point;
}
}
if (settings->number_of_threads != -1) {
interpreter->SetNumThreads(settings->number_of_threads);
}
int image_width = 224;
int image_height = 224;
int image_channels = 3;
std::vector<uint8_t> in = read_bmp(settings->input_bmp_name, &image_width,
&image_height, &image_channels, settings);
int input = interpreter->inputs()[0];
if (settings->verbose) LOG(INFO) << "input: " << input;
const std::vector<int> inputs = interpreter->inputs();
const std::vector<int> outputs = interpreter->outputs();
if (settings->verbose) {
LOG(INFO) << "number of inputs: " << inputs.size();
LOG(INFO) << "number of outputs: " << outputs.size();
}
auto profiler = std::make_unique<profiling::Profiler>(
settings->max_profiling_buffer_entries);
interpreter->SetProfiler(profiler.get());
auto delegates = delegate_providers.CreateAllDelegates();
for (auto& delegate : delegates) {
const auto delegate_name = delegate.provider->GetName();
if (interpreter->ModifyGraphWithDelegate(std::move(delegate.delegate)) !=
kTfLiteOk) {
LOG(ERROR) << "Failed to apply " << delegate_name << " delegate.";
exit(-1);
} else {
LOG(INFO) << "Applied " << delegate_name << " delegate.";
}
}
if (interpreter->AllocateTensors() != kTfLiteOk) {
LOG(ERROR) << "Failed to allocate tensors!";
exit(-1);
}
if (settings->verbose) PrintInterpreterState(interpreter.get());
TfLiteIntArray* dims = interpreter->tensor(input)->dims;
int wanted_height = dims->data[1];
int wanted_width = dims->data[2];
int wanted_channels = dims->data[3];
settings->input_type = interpreter->tensor(input)->type;
switch (settings->input_type) {
case kTfLiteFloat32:
resize<float>(interpreter->typed_tensor<float>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteInt8:
resize<int8_t>(interpreter->typed_tensor<int8_t>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
case kTfLiteUInt8:
resize<uint8_t>(interpreter->typed_tensor<uint8_t>(input), in.data(),
image_height, image_width, image_channels, wanted_height,
wanted_width, wanted_channels, settings);
break;
default:
LOG(ERROR) << "cannot handle input type "
<< interpreter->tensor(input)->type << " yet";
exit(-1);
}
if (settings->profiling) profiler->StartProfiling();
for (int i = 0; i < settings->number_of_warmup_runs; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
struct timeval start_time, stop_time;
gettimeofday(&start_time, nullptr);
for (int i = 0; i < settings->loop_count; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
LOG(ERROR) << "Failed to invoke tflite!";
exit(-1);
}
}
gettimeofday(&stop_time, nullptr);
LOG(INFO) << "invoked";
LOG(INFO) << "average time: "
<< (get_us(stop_time) - get_us(start_time)) /
(settings->loop_count * 1000)
<< " ms";
if (settings->profiling) {
profiler->StopProfiling();
auto profile_events = profiler->GetProfileEvents();
for (int i = 0; i < profile_events.size(); i++) {
auto subgraph_index = profile_events[i]->extra_event_metadata;
auto op_index = profile_events[i]->event_metadata;
const auto subgraph = interpreter->subgraph(subgraph_index);
const auto node_and_registration =
subgraph->node_and_registration(op_index);
const TfLiteRegistration registration = node_and_registration->second;
PrintProfilingInfo(profile_events[i], subgraph_index, op_index,
registration);
}
}
const float threshold = 0.001f;
std::vector<std::pair<float, int>> top_results;
int output = interpreter->outputs()[0];
TfLiteIntArray* output_dims = interpreter->tensor(output)->dims;
auto output_size = output_dims->data[output_dims->size - 1];
switch (interpreter->tensor(output)->type) {
case kTfLiteFloat32:
get_top_n<float>(interpreter->typed_output_tensor<float>(0), output_size,
settings->number_of_results, threshold, &top_results,
settings->input_type);
break;
case kTfLiteInt8:
get_top_n<int8_t>(interpreter->typed_output_tensor<int8_t>(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
case kTfLiteUInt8:
get_top_n<uint8_t>(interpreter->typed_output_tensor<uint8_t>(0),
output_size, settings->number_of_results, threshold,
&top_results, settings->input_type);
break;
default:
LOG(ERROR) << "cannot handle output type "
<< interpreter->tensor(output)->type << " yet";
exit(-1);
}
std::vector<string> labels;
size_t label_count;
if (ReadLabelsFile(settings->labels_file_name, &labels, &label_count) !=
kTfLiteOk)
exit(-1);
for (const auto& result : top_results) {
const float confidence = result.first;
const int index = result.second;
LOG(INFO) << confidence << ": " << index << " " << labels[index];
}
interpreter.reset();
}
void display_usage(const DelegateProviders& delegate_providers) {
LOG(INFO)
<< "\n"
<< delegate_providers.GetHelpMessage("label_image")
<< "\t--accelerated, -a: [0|1] use Android NNAPI or not\n"
<< "\t--allow_fp16, -f: [0|1], allow running fp32 models with fp16 or "
"not\n"
<< "\t--count, -c: loop interpreter->Invoke() for certain times\n"
<< "\t--gl_backend, -g: [0|1]: use GL GPU Delegate on Android\n"
<< "\t--hexagon_delegate, -j: [0|1]: use Hexagon Delegate on Android\n"
<< "\t--input_mean, -b: input mean\n"
<< "\t--input_std, -s: input standard deviation\n"
<< "\t--image, -i: image_name.bmp\n"
<< "\t--labels, -l: labels for the model\n"
<< "\t--tflite_model, -m: model_name.tflite\n"
<< "\t--profiling, -p: [0|1], profiling or not\n"
<< "\t--num_results, -r: number of results to show\n"
<< "\t--threads, -t: number of threads\n"
<< "\t--verbose, -v: [0|1] print more information\n"
<< "\t--warmup_runs, -w: number of warmup runs\n"
<< "\t--xnnpack_delegate, -x [0:1]: xnnpack delegate\n"
<< "\t--help, -h: Print this help message\n";
}
int Main(int argc, char** argv) {
DelegateProviders delegate_providers;
bool parse_result = delegate_providers.InitFromCmdlineArgs(
&argc, const_cast<const char**>(argv));
if (!parse_result) {
display_usage(delegate_providers);
return EXIT_FAILURE;
}
Settings s;
int c;
while (true) {
static struct option long_options[] = {
{"accelerated", required_argument, nullptr, 'a'},
{"allow_fp16", required_argument, nullptr, 'f'},
{"count", required_argument, nullptr, 'c'},
{"verbose", required_argument, nullptr, 'v'},
{"image", required_argument, nullptr, 'i'},
{"labels", required_argument, nullptr, 'l'},
{"tflite_model", required_argument, nullptr, 'm'},
{"profiling", required_argument, nullptr, 'p'},
{"threads", required_argument, nullptr, 't'},
{"input_mean", required_argument, nullptr, 'b'},
{"input_std", required_argument, nullptr, 's'},
{"num_results", required_argument, nullptr, 'r'},
{"max_profiling_buffer_entries", required_argument, nullptr, 'e'},
{"warmup_runs", required_argument, nullptr, 'w'},
{"gl_backend", required_argument, nullptr, 'g'},
{"hexagon_delegate", required_argument, nullptr, 'j'},
{"xnnpack_delegate", required_argument, nullptr, 'x'},
{"help", no_argument, nullptr, 'h'},
{nullptr, 0, nullptr, 0}};
int option_index = 0;
c = getopt_long(argc, argv, "a:b:c:d:e:f:g:i:j:l:m:p:r:s:t:v:w:x:h",
long_options, &option_index);
if (c == -1) break;
switch (c) {
case 'a':
s.accel = strtol(optarg, nullptr, 10);
break;
case 'b':
s.input_mean = strtod(optarg, nullptr);
break;
case 'c':
s.loop_count =
strtol(optarg, nullptr, 10);
break;
case 'e':
s.max_profiling_buffer_entries =
strtol(optarg, nullptr, 10);
break;
case 'f':
s.allow_fp16 =
strtol(optarg, nullptr, 10);
break;
case 'g':
s.gl_backend =
strtol(optarg, nullptr, 10);
break;
case 'i':
s.input_bmp_name = optarg;
break;
case 'j':
s.hexagon_delegate = optarg;
break;
case 'l':
s.labels_file_name = optarg;
break;
case 'm':
s.model_name = optarg;
break;
case 'p':
s.profiling =
strtol(optarg, nullptr, 10);
break;
case 'r':
s.number_of_results =
strtol(optarg, nullptr, 10);
break;
case 's':
s.input_std = strtod(optarg, nullptr);
break;
case 't':
s.number_of_threads = strtol(
optarg, nullptr, 10);
break;
case 'v':
s.verbose =
strtol(optarg, nullptr, 10);
break;
case 'w':
s.number_of_warmup_runs =
strtol(optarg, nullptr, 10);
break;
case 'x':
s.xnnpack_delegate =
strtol(optarg, nullptr, 10);
break;
case 'h':
case '?':
display_usage(delegate_providers);
exit(-1);
default:
exit(-1);
}
}
delegate_providers.MergeSettingsIntoParams(s);
RunInference(&s, delegate_providers);
return 0;
}
}
}
int main(int argc, char** argv) {
return tflite::label_image::Main(argc, argv);
} | #include "tensorflow/lite/examples/label_image/label_image.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
#include "tensorflow/lite/examples/label_image/get_top_n.h"
namespace tflite {
namespace label_image {
TEST(LabelImageTest, GraceHopper) {
std::string lena_file =
"tensorflow/lite/examples/label_image/testdata/"
"grace_hopper.bmp";
int height, width, channels;
Settings s;
s.input_type = kTfLiteUInt8;
std::vector<uint8_t> input =
read_bmp(lena_file, &width, &height, &channels, &s);
ASSERT_EQ(height, 606);
ASSERT_EQ(width, 517);
ASSERT_EQ(channels, 3);
std::vector<uint8_t> output(606 * 517 * 3);
resize<uint8_t>(output.data(), input.data(), 606, 517, 3, 214, 214, 3, &s);
ASSERT_EQ(output[0], 0x15);
ASSERT_EQ(output[214 * 214 * 3 - 1], 0x11);
}
TEST(LabelImageTest, GetTopN) {
uint8_t in[] = {1, 1, 2, 2, 4, 4, 16, 32, 128, 64};
std::vector<std::pair<float, int>> top_results;
get_top_n<uint8_t>(in, 10, 5, 0.025, &top_results, kTfLiteUInt8);
ASSERT_EQ(top_results.size(), 4);
ASSERT_EQ(top_results[0].second, 8);
}
}
} |
722 | cpp | tensorflow/tensorflow | resource_variable | tensorflow/lite/experimental/resource/resource_variable.cc | tensorflow/lite/experimental/resource/resource_variable_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_VARIABLE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_VARIABLE_H_
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
namespace tflite {
namespace resource {
class ResourceVariable : public ResourceBase {
public:
ResourceVariable();
ResourceVariable(ResourceVariable&& other);
ResourceVariable(const ResourceVariable&) = delete;
ResourceVariable& operator=(const ResourceVariable&) = delete;
~ResourceVariable() override;
TfLiteStatus AssignFrom(const TfLiteTensor* tensor);
TfLiteTensor* GetTensor() { return is_initialized_ ? &tensor_ : nullptr; }
bool IsInitialized() override { return is_initialized_; }
size_t GetMemoryUsage() override {
return is_initialized_ ? tensor_.bytes : 0;
}
protected:
TfLiteTensor tensor_;
bool is_initialized_ = false;
};
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id);
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id);
bool IsBuiltinResource(const TfLiteTensor* tensor);
}
}
#endif
#include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <cstdlib>
#include <cstring>
#include <map>
#include <memory>
#include "tensorflow/lite/core/c/c_api_types.h"
namespace tflite {
namespace resource {
ResourceVariable::ResourceVariable() {
memset(&tensor_, 0, sizeof(TfLiteTensor));
}
ResourceVariable::ResourceVariable(ResourceVariable&& other) {
tensor_ = other.tensor_;
is_initialized_ = other.is_initialized_;
memset(&other.tensor_, 0, sizeof(TfLiteTensor));
other.is_initialized_ = false;
}
ResourceVariable::~ResourceVariable() {
if (is_initialized_) {
free(tensor_.data.raw);
if (tensor_.dims) {
TfLiteIntArrayFree(tensor_.dims);
}
}
}
TfLiteStatus ResourceVariable::AssignFrom(const TfLiteTensor* tensor) {
char* old_raw = tensor_.data.raw;
size_t old_bytes = tensor_.bytes;
TfLiteIntArray* old_dims = tensor_.dims;
memset(&tensor_, 0, sizeof(tensor_));
tensor_.name = "ResourceVariable";
tensor_.allocation_type = kTfLiteDynamic;
tensor_.type = tensor->type;
tensor_.params = tensor->params;
tensor_.quantization = tensor->quantization;
if (TfLiteIntArrayEqual(old_dims, tensor->dims)) {
tensor_.dims = old_dims;
} else {
TfLiteIntArrayFree(old_dims);
tensor_.dims = TfLiteIntArrayCopy(tensor->dims);
}
tensor_.data.raw = old_raw;
if (old_bytes != tensor->bytes) {
TfLiteTensorRealloc(tensor->bytes, &tensor_);
} else {
tensor_.bytes = old_bytes;
}
memcpy(tensor_.data.raw, tensor->data.raw, tensor_.bytes);
is_initialized_ = true;
return kTfLiteOk;
}
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id) {
if (resources->count(resource_id) != 0) {
return;
}
resources->emplace(resource_id, std::make_unique<ResourceVariable>());
}
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id) {
auto it = resources->find(resource_id);
if (it != resources->end()) {
return static_cast<ResourceVariable*>(it->second.get());
}
return nullptr;
}
bool IsBuiltinResource(const TfLiteTensor* tensor) {
return tensor && tensor->type == kTfLiteResource &&
tensor->delegate == nullptr;
}
}
} | #include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace resource {
void InitTensor(const std::vector<int>& shape, TfLiteAllocationType alloc_type,
float default_value, TfLiteTensor* tensor) {
memset(tensor, 0, sizeof(TfLiteTensor));
int num_elements = 1;
for (auto dim : shape) num_elements *= dim;
if (shape.empty()) num_elements = 0;
float* buf = static_cast<float*>(malloc(sizeof(float) * num_elements));
for (int i = 0; i < num_elements; ++i) buf[i] = default_value;
const int bytes = num_elements * sizeof(buf[0]);
auto* dims = ConvertArrayToTfLiteIntArray(shape.size(), shape.data());
TfLiteTensorReset(TfLiteType::kTfLiteFloat32, nullptr, dims, {},
reinterpret_cast<char*>(buf), bytes, alloc_type, nullptr,
false, tensor);
}
TEST(ResourceTest, NonDynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, DynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteDynamic, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, AssignSameSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {1};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(ResourceTest, AssignDifferentSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {2};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float) * 2, value->bytes);
ASSERT_THAT(value, DimsAre({2}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(IsBuiltinResource, IsBuiltinResourceTest) {
TfLiteTensor tensor;
tensor.type = kTfLiteResource;
tensor.delegate = nullptr;
EXPECT_TRUE(IsBuiltinResource(&tensor));
EXPECT_FALSE(IsBuiltinResource(nullptr));
tensor.type = kTfLiteFloat32;
EXPECT_FALSE(IsBuiltinResource(&tensor));
tensor.type = kTfLiteResource;
TfLiteDelegate delegate;
tensor.delegate = &delegate;
EXPECT_FALSE(IsBuiltinResource(&tensor));
}
TEST(ResourceTest, GetMemoryUsage) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {100};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(100 * sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({100}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(100 * sizeof(float), var.GetMemoryUsage());
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
}
} |
723 | cpp | tensorflow/tensorflow | cache_buffer | tensorflow/lite/experimental/resource/cache_buffer.cc | tensorflow/lite/experimental/resource/cache_buffer_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_CACHE_BUFFER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_CACHE_BUFFER_H_
#include <memory>
#include <unordered_map>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/resource/resource_variable.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace resource {
class CacheBuffer : public ResourceVariable {
public:
CacheBuffer() = default;
CacheBuffer(const CacheBuffer &) = delete;
~CacheBuffer() override;
CacheBuffer &operator=(const CacheBuffer &) = delete;
TfLiteStatus Initialize(const TfLiteIntArray &shape);
size_t GetNumEntries(int idx) const;
float *GetBuffer();
size_t GetSize();
void SetNumEntries(int idx, size_t count);
private:
std::unique_ptr<size_t[]> num_entries_;
std::unique_ptr<float[]> buffer_;
TfLiteIntArray *dims_;
};
}
}
#endif
#include "tensorflow/lite/experimental/resource/cache_buffer.h"
#include <cstdlib>
#include <cstring>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace resource {
TfLiteStatus CacheBuffer::Initialize(const TfLiteIntArray& shape) {
dims_ = TfLiteIntArrayCopy(&shape);
const size_t buf_size = NumElements(&shape);
buffer_.reset(new float[buf_size]);
memset(buffer_.get(), 0, sizeof(float) * buf_size);
num_entries_.reset(new size_t[shape.data[1]]);
memset(num_entries_.get(), 0, sizeof(size_t) * shape.data[1]);
is_initialized_ = true;
return kTfLiteOk;
}
size_t CacheBuffer::GetSize() { return sizeof(float) * NumElements(dims_); }
size_t CacheBuffer::GetNumEntries(int idx) const { return num_entries_[idx]; }
CacheBuffer::~CacheBuffer() { TfLiteIntArrayFree(dims_); }
float* CacheBuffer::GetBuffer() { return buffer_.get(); }
void CacheBuffer::SetNumEntries(int idx, size_t count) {
TFLITE_DCHECK(count <= dims_->data[2]);
num_entries_[idx] = count;
}
}
} | #include "tensorflow/lite/experimental/resource/cache_buffer.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace resource {
TEST(CacheBufferTest, Initialize) {
TfLiteIntArray* shape = TfLiteIntArrayCreate(4);
shape->data[0] = 1;
shape->data[1] = 3;
shape->data[2] = 5;
shape->data[3] = 7;
CacheBuffer cache_buffer;
cache_buffer.Initialize(*shape);
EXPECT_EQ(cache_buffer.GetSize(), 420);
ASSERT_NE(cache_buffer.GetBuffer(), nullptr);
EXPECT_EQ(cache_buffer.GetNumEntries(0), 0);
EXPECT_EQ(cache_buffer.GetNumEntries(1), 0);
EXPECT_EQ(cache_buffer.GetNumEntries(2), 0);
cache_buffer.SetNumEntries(0, 3);
EXPECT_EQ(cache_buffer.GetNumEntries(0), 3);
TfLiteIntArrayFree(shape);
}
}
} |
724 | cpp | tensorflow/tensorflow | audio_microfrontend | tensorflow/lite/experimental/microfrontend/audio_microfrontend.cc | tensorflow/lite/experimental/microfrontend/audio_microfrontend_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_AUDIO_MICROFRONTEND_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_AUDIO_MICROFRONTEND_H_
#include "tensorflow/lite/context.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_AUDIO_MICROFRONTEND();
}
}
}
#endif
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace audio_microfrontend {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
typedef struct {
int sample_rate;
FrontendState* state;
int left_context;
int right_context;
int frame_stride;
bool zero_padding;
int out_scale;
bool out_float;
} TfLiteAudioMicrofrontendParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new TfLiteAudioMicrofrontendParams;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
data->sample_rate = m["sample_rate"].AsInt32();
struct FrontendConfig config;
config.window.size_ms = m["window_size"].AsInt32();
config.window.step_size_ms = m["window_step"].AsInt32();
config.filterbank.num_channels = m["num_channels"].AsInt32();
config.filterbank.upper_band_limit = m["upper_band_limit"].AsFloat();
config.filterbank.lower_band_limit = m["lower_band_limit"].AsFloat();
config.noise_reduction.smoothing_bits = m["smoothing_bits"].AsInt32();
config.noise_reduction.even_smoothing = m["even_smoothing"].AsFloat();
config.noise_reduction.odd_smoothing = m["odd_smoothing"].AsFloat();
config.noise_reduction.min_signal_remaining =
m["min_signal_remaining"].AsFloat();
config.pcan_gain_control.enable_pcan = m["enable_pcan"].AsBool();
config.pcan_gain_control.strength = m["pcan_strength"].AsFloat();
config.pcan_gain_control.offset = m["pcan_offset"].AsFloat();
config.pcan_gain_control.gain_bits = m["gain_bits"].AsInt32();
config.log_scale.enable_log = m["enable_log"].AsBool();
config.log_scale.scale_shift = m["scale_shift"].AsInt32();
data->state = new FrontendState;
FrontendPopulateState(&config, data->state, data->sample_rate);
data->left_context = m["left_context"].AsInt32();
data->right_context = m["right_context"].AsInt32();
data->frame_stride = m["frame_stride"].AsInt32();
data->zero_padding = m["zero_padding"].AsBool();
data->out_scale = m["out_scale"].AsInt32();
data->out_float = m["out_float"].AsBool();
return data;
}
void Free(TfLiteContext* context, void* buffer) {
auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(buffer);
FrontendFreeStateContents(data->state);
delete data->state;
delete data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16);
output->type = kTfLiteInt32;
if (data->out_float) {
output->type = kTfLiteFloat32;
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(2);
int num_frames = 0;
if (input->dims->data[0] >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step / data->frame_stride +
1;
}
output_size->data[0] = num_frames;
output_size->data[1] = data->state->filterbank.num_channels *
(1 + data->left_context + data->right_context);
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
void GenerateFeatures(TfLiteAudioMicrofrontendParams* data,
const TfLiteTensor* input, TfLiteTensor* output) {
const int16_t* audio_data = GetTensorData<int16_t>(input);
int64_t audio_size = input->dims->data[0];
T* filterbanks_flat = GetTensorData<T>(output);
int num_frames = 0;
if (audio_size >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step +
1;
}
std::vector<std::vector<T>> frame_buffer(num_frames);
int frame_index = 0;
while (audio_size > 0) {
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
data->state, audio_data, audio_size, &num_samples_read);
audio_data += num_samples_read;
audio_size -= num_samples_read;
if (output.values != nullptr) {
frame_buffer[frame_index].reserve(output.size);
int i;
for (i = 0; i < output.size; ++i) {
frame_buffer[frame_index].push_back(static_cast<T>(output.values[i]) /
data->out_scale);
}
++frame_index;
}
}
int index = 0;
std::vector<T> pad(data->state->filterbank.num_channels, 0);
int anchor;
for (anchor = 0; anchor < frame_buffer.size(); anchor += data->frame_stride) {
int frame;
for (frame = anchor - data->left_context;
frame <= anchor + data->right_context; ++frame) {
std::vector<T>* feature;
if (data->zero_padding && (frame < 0 || frame >= frame_buffer.size())) {
feature = &pad;
} else if (frame < 0) {
feature = &frame_buffer[0];
} else if (frame >= frame_buffer.size()) {
feature = &frame_buffer[frame_buffer.size() - 1];
} else {
feature = &frame_buffer[frame];
}
for (auto f : *feature) {
filterbanks_flat[index++] = f;
}
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
FrontendReset(data->state);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (data->out_float) {
GenerateFeatures<float>(data, input, output);
} else {
GenerateFeatures<int32>(data, input, output);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_AUDIO_MICROFRONTEND() {
static TfLiteRegistration r = {
audio_microfrontend::Init, audio_microfrontend::Free,
audio_microfrontend::Prepare, audio_microfrontend::Eval};
return &r;
}
}
}
} | #include "tensorflow/lite/experimental/microfrontend/audio_microfrontend.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using ::testing::ElementsAreArray;
class MicroFrontendOpModel : public SingleOpModel {
public:
MicroFrontendOpModel(int n_input, int n_frame, int n_frequency_per_frame,
int n_left_context, int n_right_context,
int n_frame_stride,
const std::vector<std::vector<int>>& input_shapes)
: n_input_(n_input),
n_frame_(n_frame),
n_frequency_per_frame_(n_frequency_per_frame),
n_left_context_(n_left_context),
n_right_context_(n_right_context),
n_frame_stride_(n_frame_stride) {
input_ = AddInput(TensorType_INT16);
output_ = AddOutput(TensorType_INT32);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("sample_rate", 1000);
fbb.Int("window_size", 25);
fbb.Int("window_step", 10);
fbb.Int("num_channels", 2);
fbb.Float("upper_band_limit", 450.0);
fbb.Float("lower_band_limit", 8.0);
fbb.Int("smoothing_bits", 10);
fbb.Float("even_smoothing", 0.025);
fbb.Float("odd_smoothing", 0.06);
fbb.Float("min_signal_remaining", 0.05);
fbb.Bool("enable_pcan", true);
fbb.Float("pcan_strength", 0.95);
fbb.Float("pcan_offset", 80.0);
fbb.Int("gain_bits", 21);
fbb.Bool("enable_log", true);
fbb.Int("scale_shift", 6);
fbb.Int("left_context", n_left_context);
fbb.Int("right_context", n_right_context);
fbb.Int("frame_stride", n_frame_stride);
fbb.Bool("zero_padding", true);
fbb.Int("out_scale", 1);
fbb.Bool("out_float", false);
});
fbb.Finish();
SetCustomOp("MICRO_FRONTEND", fbb.GetBuffer(),
Register_AUDIO_MICROFRONTEND);
BuildInterpreter(input_shapes);
}
void SetInput(const std::vector<int16_t>& data) {
PopulateTensor(input_, data);
}
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
int num_inputs() { return n_input_; }
int num_frmes() { return n_frame_; }
int num_frequency_per_frame() { return n_frequency_per_frame_; }
int num_left_context() { return n_left_context_; }
int num_right_context() { return n_right_context_; }
int num_frame_stride() { return n_frame_stride_; }
protected:
int input_;
int output_;
int n_input_;
int n_frame_;
int n_frequency_per_frame_;
int n_left_context_;
int n_right_context_;
int n_frame_stride_;
};
class BaseMicroFrontendTest : public ::testing::Test {
protected:
std::vector<int16_t> micro_frontend_input_;
void VerifyGoldens(const std::vector<int16_t>& input,
const std::vector<std::vector<int>>& output,
MicroFrontendOpModel* micro_frontend,
float tolerance = 1e-5) {
const int num_inputs = micro_frontend->num_inputs();
EXPECT_GT(num_inputs, 0);
const int num_frames = micro_frontend->num_frmes();
EXPECT_GT(num_frames, 0);
EXPECT_EQ(num_frames, output.size());
const int num_frequency_per_frame =
micro_frontend->num_frequency_per_frame();
EXPECT_GT(num_frequency_per_frame, 0);
EXPECT_EQ(num_frequency_per_frame, output[0].size());
micro_frontend->SetInput(input);
ASSERT_EQ(micro_frontend->Invoke(), kTfLiteOk);
std::vector<int> output_flattened;
int anchor;
for (anchor = 0; anchor < output.size();
anchor += micro_frontend->num_frame_stride()) {
int frame;
for (frame = anchor - micro_frontend->num_left_context();
frame <= anchor + micro_frontend->num_right_context(); ++frame) {
if (frame < 0 || frame >= output.size()) {
int j;
for (j = 0; j < num_frequency_per_frame; ++j) {
output_flattened.push_back(0.0);
}
} else {
for (auto data_point : output[frame]) {
output_flattened.push_back(data_point);
}
}
}
}
EXPECT_THAT(micro_frontend->GetOutput(),
ElementsAreArray(output_flattened));
}
};
class TwoConsecutive36InputsMicroFrontendTest : public BaseMicroFrontendTest {
void SetUp() override {
micro_frontend_input_ = {
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768};
}
};
TEST_F(TwoConsecutive36InputsMicroFrontendTest, MicroFrontendBlackBoxTest) {
const int n_input = 36;
const int n_frame = 2;
const int n_frequency_per_frame = 2;
MicroFrontendOpModel micro_frontend(n_input, n_frame, n_frequency_per_frame,
1, 1, 1,
{
{n_input},
});
const std::vector<std::vector<int>> micro_frontend_golden_output = {
{479, 425}, {436, 378}};
VerifyGoldens(micro_frontend_input_, micro_frontend_golden_output,
µ_frontend);
}
}
}
}
} |
725 | cpp | tensorflow/tensorflow | fft | third_party/ducc/fft.cc | third_party/xla/xla/tests/fft_test.cc | #ifndef FFT2D_FFT_H__
#define FFT2D_FFT_H__
#ifdef __cplusplus
extern "C" {
#endif
extern void cdft(int, int, double *, int *, double *);
extern void rdft(int, int, double *, int *, double *);
extern void ddct(int, int, double *, int *, double *);
extern void ddst(int, int, double *, int *, double *);
extern void dfct(int, double *, double *, int *, double *);
extern void dfst(int, double *, double *, int *, double *);
#ifdef __cplusplus
}
#endif
#endif
#include "ducc/google/fft.h"
#include <complex>
#include <cstddef>
#include <cstdlib>
#include <exception>
#include <iostream>
#include <ostream>
#include <vector>
#include "ducc/google/threading.h"
#include "ducc/src/ducc0/fft/fft.h"
#include "ducc/src/ducc0/fft/fft1d_impl.h"
#include "ducc/src/ducc0/fft/fftnd_impl.h"
#include "ducc/src/ducc0/infra/mav.h"
#include "ducc/src/ducc0/infra/threading.h"
#include "unsupported/Eigen/CXX11/ThreadPool"
namespace ducc0 {
namespace google {
using Shape = std::vector<std::size_t>;
using Stride = std::vector<std::ptrdiff_t>;
template <typename RealScalar>
void c2c(const std::complex<RealScalar>* in, const Shape& in_shape,
const Stride& in_stride, std::complex<RealScalar>* out,
const Shape& out_shape, const Stride& out_stride, const Shape& axes,
bool forward, RealScalar scale,
Eigen::ThreadPoolInterface* thread_pool) {
ducc0::cfmav<std::complex<RealScalar>> m_in(in, in_shape, in_stride);
ducc0::vfmav<std::complex<RealScalar>> m_out(out, out_shape, out_stride);
try {
if (thread_pool == nullptr) {
ducc0::google::NoThreadPool no_thread_pool;
ducc0::detail_threading::ScopedUseThreadPool thread_pool_guard(
no_thread_pool);
ducc0::c2c(m_in, m_out, axes, forward, scale, 1);
} else {
EigenThreadPool eigen_thread_pool(*thread_pool);
ducc0::detail_threading::ScopedUseThreadPool thread_pool_guard(
eigen_thread_pool);
ducc0::c2c(m_in, m_out, axes, forward, scale,
eigen_thread_pool.nthreads());
}
} catch (const std::exception& ex) {
std::cerr << "DUCC FFT c2c failed: " << ex.what() << std::endl;
std::abort();
}
}
template <typename RealScalar>
void r2c(const RealScalar* in, const Shape& in_shape, const Stride& in_stride,
std::complex<RealScalar>* out, const Shape& out_shape,
const Stride& out_stride, const Shape& axes, bool forward,
RealScalar scale, Eigen::ThreadPoolInterface* thread_pool) {
ducc0::cfmav<RealScalar> m_in(in, in_shape, in_stride);
ducc0::vfmav<std::complex<RealScalar>> m_out(out, out_shape, out_stride);
try {
if (thread_pool == nullptr) {
ducc0::google::NoThreadPool no_thread_pool;
ducc0::detail_threading::ScopedUseThreadPool thread_pool_guard(
no_thread_pool);
ducc0::r2c(m_in, m_out, axes, forward, scale, 1);
} else {
EigenThreadPool eigen_thread_pool(*thread_pool);
ducc0::detail_threading::ScopedUseThreadPool thread_pool_guard(
eigen_thread_pool);
ducc0::r2c(m_in, m_out, axes, forward, scale,
eigen_thread_pool.nthreads());
}
} catch (const std::exception& ex) {
std::cerr << "DUCC FFT r2c failed: " << ex.what() << std::endl;
std::abort();
}
}
template <typename RealScalar>
void c2r(const std::complex<RealScalar>* in, const Shape& in_shape,
const Stride& in_stride, RealScalar* out, const Shape& out_shape,
const Stride& out_stride, const Shape& axes, bool forward,
RealScalar scale, Eigen::ThreadPoolInterface* thread_pool) {
ducc0::cfmav<std::complex<RealScalar>> m_in(in, in_shape, in_stride);
ducc0::vfmav<RealScalar> m_out(out, out_shape, out_stride);
try {
if (thread_pool == nullptr) {
ducc0::google::NoThreadPool no_thread_pool;
ducc0::detail_threading::ScopedUseThreadPool thread_pool_guard(
no_thread_pool);
ducc0::c2r(m_in, m_out, axes, forward, scale, 1);
} else {
EigenThreadPool eigen_thread_pool(*thread_pool);
ducc0::detail_threading::ScopedUseThreadPool thread_pool_guard(
eigen_thread_pool);
ducc0::c2r(m_in, m_out, axes, forward, scale,
eigen_thread_pool.nthreads());
}
} catch (const std::exception& ex) {
std::cerr << "DUCC FFT c2r failed: " << ex.what() << std::endl;
std::abort();
}
}
#define FFT_DEFINITIONS(RealScalar) \
template void c2c<RealScalar>( \
const std::complex<RealScalar>* in, const Shape& in_shape, \
const Stride& in_stride, std::complex<RealScalar>* out, \
const Shape& out_shape, const Stride& out_stride, const Shape& axes, \
bool forward, RealScalar scale, \
Eigen::ThreadPoolInterface* thread_pool); \
template void r2c<RealScalar>( \
const RealScalar* in, const Shape& in_shape, const Stride& in_stride, \
std::complex<RealScalar>* out, const Shape& out_shape, \
const Stride& out_stride, const Shape& axes, bool forward, \
RealScalar scale, Eigen::ThreadPoolInterface* thread_pool); \
template void c2r(const std::complex<RealScalar>* in, const Shape& in_shape, \
const Stride& in_stride, RealScalar* out, \
const Shape& out_shape, const Stride& out_stride, \
const Shape& axes, bool forward, RealScalar scale, \
Eigen::ThreadPoolInterface* thread_pool)
FFT_DEFINITIONS(float);
FFT_DEFINITIONS(double);
#undef FFT_DEFINITIONS
}
} | #include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
namespace xla {
namespace {
class FftTextTest : public HloTestBase {};
XLA_TEST_F(FftTextTest, Fft) {
absl::string_view hlo_string = R"(
HloModule Fft_module
ENTRY Fft {
input = c64[8,32]{0,1} parameter(0)
fft = c64[8,32] fft(input), fft_type=FFT, fft_length={32}
ROOT transpose = c64[32,8] transpose(fft), dimensions={1,0}
})";
EXPECT_TRUE(RunAndCompare(hlo_string, ErrorSpec{4e-3, 4e-3}));
}
}
} |
726 | cpp | tensorflow/tensorflow | canonicalize_value | tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.cc | tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_CANONICALIZE_VALUE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_CANONICALIZE_VALUE_H_
#include <string>
#include "absl/strings/string_view.h"
namespace tflite::acceleration {
std::string CanonicalizeValue(absl::string_view value);
std::string CanonicalizeValueWithKey(absl::string_view key,
absl::string_view value);
}
#endif
#include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <iterator>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "re2/re2.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
inline char ascii_normalise(const unsigned char c) {
if (c == ' ' || c == '-') {
return '_';
}
return absl::ascii_tolower(c);
}
}
std::string CanonicalizeValue(absl::string_view value) {
std::string output;
absl::c_transform(value, std::back_inserter(output),
tflite::acceleration::ascii_normalise);
return output;
}
std::string CanonicalizeValueWithKey(absl::string_view key,
absl::string_view value) {
std::string output = CanonicalizeValue(value);
std::string gpu_output;
return key == kGPUModel &&
RE2::FullMatch(
output,
R"((angle_\(samsung_xclipse_[0-9]*\)_on_vulkan).*$)",
&gpu_output)
? gpu_output
: output;
}
} | #include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
TEST(CanonicalizeValue, CharactersAreLowercased) {
EXPECT_EQ(CanonicalizeValue("hElLo"), "hello");
}
TEST(CanonicalizeValue, HyphensAreReplaced) {
EXPECT_EQ(CanonicalizeValue("-"), "_");
}
TEST(CanonicalizeValue, SpacesAreReplaced) {
EXPECT_EQ(CanonicalizeValue(" "), "_");
}
TEST(CanonicalizeValue, OtherSpecialCharactersAreUnaffected) {
for (unsigned char c = 0; c < 65; ++c) {
if (c == ' ' || c == '-') continue;
std::string s = {1, static_cast<char>(c)};
EXPECT_EQ(CanonicalizeValue(s), s);
}
}
TEST(CanonicalizeValue, SamsungXclipseGpuNormalized) {
EXPECT_EQ(CanonicalizeValueWithKey(
kGPUModel, "ANGLE (Samsung Xclipse 920) on Vulkan 1.1.179"),
"angle_(samsung_xclipse_920)_on_vulkan");
}
}
} |
727 | cpp | tensorflow/tensorflow | devicedb | tensorflow/lite/experimental/acceleration/compatibility/devicedb.cc | tensorflow/lite/experimental/acceleration/compatibility/devicedb_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DEVICEDB_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DEVICEDB_H_
#include <map>
#include <string>
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
namespace tflite {
namespace acceleration {
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database);
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <map>
#include <string>
#include <vector>
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
namespace tflite {
namespace acceleration {
namespace {
std::vector<const DeviceDecisionTreeEdge*> Find(
const DeviceDecisionTreeNode* root, const std::string& value) {
std::vector<const DeviceDecisionTreeEdge*> found;
if (root->comparison() == Comparison_EQUAL) {
const DeviceDecisionTreeEdge* possible =
root->items()->LookupByKey(value.c_str());
if (possible) {
found.push_back(possible);
}
} else {
for (const DeviceDecisionTreeEdge* item : *(root->items())) {
if ((root->comparison() == Comparison_MINIMUM)
? value >= item->value()->str()
: value <= item->value()->str()) {
found.push_back(item);
}
}
}
return found;
}
void UpdateVariablesFromDeviceDecisionTreeEdges(
std::map<std::string, std::string>* variable_values,
const DeviceDecisionTreeEdge& item) {
if (item.derived_properties()) {
for (const DerivedProperty* p : *(item.derived_properties())) {
(*variable_values)[p->variable()->str()] = p->value()->str();
}
}
}
void Follow(const DeviceDecisionTreeNode* root,
std::map<std::string, std::string>* variable_values) {
if (!root->variable()) {
return;
}
auto possible_value = variable_values->find(root->variable()->str());
if (possible_value == variable_values->end()) {
return;
}
std::vector<const DeviceDecisionTreeEdge*> edges =
Find(root, possible_value->second);
for (const DeviceDecisionTreeEdge* edge : edges) {
UpdateVariablesFromDeviceDecisionTreeEdges(variable_values, *edge);
if (edge->children()) {
for (const DeviceDecisionTreeNode* root : *(edge->children())) {
Follow(root, variable_values);
}
}
}
}
}
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database) {
if (!database.root()) return;
for (const DeviceDecisionTreeNode* root : *(database.root())) {
Follow(root, variable_values);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb-sample.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace acceleration {
namespace {
class DeviceDbTest : public ::testing::Test {
protected:
void LoadSample() {
device_db_ = flatbuffers::GetRoot<DeviceDatabase>(
g_tflite_acceleration_devicedb_sample_binary);
}
const DeviceDatabase* device_db_ = nullptr;
};
TEST_F(DeviceDbTest, Load) {
LoadSample();
ASSERT_TRUE(device_db_);
ASSERT_TRUE(device_db_->root());
EXPECT_EQ(device_db_->root()->size(), 4);
}
TEST_F(DeviceDbTest, SocLookup) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7872");
variables.clear();
variables[kDeviceModel] = "sc_02l";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7885");
variables.clear();
variables[kDeviceModel] = "nosuch";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(kSoCModel), variables.end());
}
TEST_F(DeviceDbTest, StatusLookupWithSoC) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7872";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
variables[kOpenGLESVersion] = "3.0";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
variables.clear();
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7883";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
variables[kAndroidSdkVersion] = "29";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupWithDevice) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810f";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables.clear();
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810m";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupBasedOnDerivedProperties) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kOpenGLESVersion] = "3.1";
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupWithMaximumComparison) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kDeviceModel] = "shiraz_ag_2011";
variables[kAndroidSdkVersion] = "28";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables[kAndroidSdkVersion] = "27";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables[kAndroidSdkVersion] = "29";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
}
}
}
} |
728 | cpp | tensorflow/tensorflow | gpu_compatibility | tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility.cc | tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_VERSIONING_GPU_COMPATIBILITY_H_
#define TENSORFLOW_LITE_TOOLS_VERSIONING_GPU_COMPATIBILITY_H_
#include "absl/status/status.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
enum class GpuCompatibilityFlags {
kStandard = 0,
kEnhancedBroadcast = 1 << 0,
};
absl::Status CheckGpuDelegateCompatibility(
const OpSignature& op_sig,
GpuCompatibilityFlags flags = GpuCompatibilityFlags::kStandard);
absl::Status CheckGpuDelegateCompatibility(const OperatorCode* op_code,
const Operator* op,
const SubGraph* subgraph,
const Model* model);
absl::Status CheckGpuDelegateCompatibility(
const TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration,
GpuCompatibilityFlags flags = GpuCompatibilityFlags::kStandard);
}
#endif
#include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace {
const std::string GetOpName(const OpSignature& op_sig) {
if (op_sig.op == tflite::BuiltinOperator_CUSTOM) {
return op_sig.custom_name;
}
return tflite::EnumNamesBuiltinOperator()[op_sig.op];
}
int NumElements(const std::vector<int32_t>& dims) {
int count = 1;
for (int i = 0; i < dims.size(); ++i) {
count *= dims.at(i);
}
return count;
}
#define RETURN_IF_ERROR(s) \
{ \
auto c = (s); \
if (!c.ok()) return c; \
}
template <typename ParamsT>
absl::Status RetrieveBuiltinData(const OpSignature& op_sig,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(op_sig.builtin_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve builtin_data.");
}
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveCustomInitialData(const OpSignature& op_sig,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(op_sig.custom_initial_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve custom_initial_data.");
}
return absl::OkStatus();
}
absl::Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
switch (fused_activation) {
case kTfLiteActNone:
case kTfLiteActRelu:
case kTfLiteActReluN1To1:
case kTfLiteActRelu6:
case kTfLiteActTanh:
case kTfLiteActSigmoid:
return absl::OkStatus();
case kTfLiteActSignBit:
return absl::UnimplementedError(
"TfLiteFusedActivation.kTfLiteActSignBit");
}
}
int GetNumberOfRuntimeInputs(const OpSignature& op_sig) {
int number_of_runtime_inputs = 0;
for (auto& input : op_sig.inputs) {
if (!input.is_const && input.type != kTfLiteNoType) {
number_of_runtime_inputs++;
}
}
return number_of_runtime_inputs;
}
absl::Status CheckInputsOutputs(const OpSignature& op_sig,
const int required_runtime_inputs,
const int required_outputs) {
const int runtime_inputs_from_model = GetNumberOfRuntimeInputs(op_sig);
if (runtime_inputs_from_model != required_runtime_inputs) {
return absl::InternalError(
absl::StrCat("Expected ", required_runtime_inputs,
" runtime input tensor(s), but node has ",
runtime_inputs_from_model, " runtime input(s)."));
}
const int outputs_from_model = op_sig.outputs.size();
if (outputs_from_model != required_outputs) {
return absl::InternalError(absl::StrCat("Expected ", required_outputs,
" output tensor(s), but node has ",
outputs_from_model, " output(s)."));
}
return absl::OkStatus();
}
absl::Status CheckInputsConstsOutputs(const OpSignature& op_sig,
int required_runtime_inputs,
int required_const_inputs,
int required_outputs) {
int const_inputs_from_model = 0;
for (auto& input : op_sig.inputs) {
if (input.is_const) {
++const_inputs_from_model;
}
}
if (const_inputs_from_model != required_const_inputs) {
return absl::InternalError(
absl::StrCat("Expected ", required_const_inputs,
" const input tensor(s), but node has ",
const_inputs_from_model, " const input(s)."));
}
return CheckInputsOutputs(op_sig, required_runtime_inputs, required_outputs);
}
absl::Status CheckTensorIsAvailable(const OpSignature& op_sig, int idx) {
if (idx >= op_sig.inputs.size()) {
return absl::OutOfRangeError(
absl::StrCat("Requested index goes beyond array size: ", idx, " vs ",
op_sig.inputs.size()));
}
return absl::OkStatus();
}
absl::Status CheckConvoultionInputOutput(const OpSignature& op_sig) {
const int runtime_inputs = GetNumberOfRuntimeInputs(op_sig);
if (runtime_inputs > 2) {
return absl::InternalError(
absl::StrCat("Expected 1 or 2 input tensor(s), but node has ",
runtime_inputs, " runtime inputs."));
}
const int runtime_outputs = op_sig.outputs.size();
if (runtime_outputs != 1) {
return absl::InternalError(
absl::StrCat("Expected 1 output tensor(s), but node has ",
runtime_outputs, " runtime outputs."));
}
if (runtime_inputs == 1) {
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
}
return absl::OkStatus();
}
absl::Status CheckStrides(int strides_h, int strides_w) {
if (strides_h <= 0 || strides_w <= 0) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect stride values: stride_height = ", strides_h,
", stride_width = ", strides_w));
}
return absl::OkStatus();
}
absl::Status CheckDilation(int dilation_h, int dilation_w) {
if (dilation_h <= 0 || dilation_w <= 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Incorrect dilation values: dilation_height = ", dilation_h,
", dilation_width = ", dilation_w));
}
return absl::OkStatus();
}
absl::Status CheckStridesAndDilation(int strides_h, int strides_w,
int dilation_h, int dilation_w) {
RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
RETURN_IF_ERROR(CheckDilation(dilation_h, dilation_w));
return absl::OkStatus();
}
absl::Status CheckKernels(int kernel_h, int kernel_w) {
if (kernel_h <= 0 || kernel_w <= 0) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect kernel values: kernel_height = ", kernel_h,
", kernel_width = ", kernel_w));
}
return absl::OkStatus();
}
absl::Status CheckKernelsAndStrides(int kernel_h, int kernel_w, int strides_h,
int strides_w) {
RETURN_IF_ERROR(CheckKernels(kernel_h, kernel_w));
RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
return absl::OkStatus();
}
absl::Status CheckAxesAreInt32Const(const OpSignature& op_sig, int idx) {
auto axes = op_sig.inputs.at(idx);
if (!axes.is_const) {
return absl::UnimplementedError(GetOpName(op_sig) +
" is only supported with constant axes.");
}
if (axes.type != kTfLiteInt32) {
return absl::UnimplementedError(absl::StrCat(
GetOpName(op_sig) + " supports int32 tensor for axes. But node has ",
TfLiteTypeGetName(axes.type)));
}
return absl::OkStatus();
}
absl::Status CheckPooling2DGpuDelegateCompatibility(const OpSignature& op_sig) {
const TfLitePoolParams* tf_options;
if (op_sig.custom_initial_data) {
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
2));
} else {
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
}
RETURN_IF_ERROR(CheckKernelsAndStrides(
tf_options->filter_height, tf_options->filter_width,
tf_options->stride_height, tf_options->stride_width));
return IsActivationSupported(tf_options->activation);
}
absl::Status CheckDepthwiseConvGpuDelegateCompatibility(
const OpSignature& op_sig) {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteDepthwiseConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckStridesAndDilation(
tf_options->stride_height, tf_options->stride_width,
tf_options->dilation_height_factor, tf_options->dilation_width_factor));
RETURN_IF_ERROR(IsActivationSupported(tf_options->activation));
const int depth_multiplier = tf_options->depth_multiplier;
const auto* input = &op_sig.inputs[0];
const auto* filter = &op_sig.inputs[1];
const auto* bias = op_sig.inputs.size() > 2 ? &op_sig.inputs[2] : nullptr;
const auto* output = &op_sig.outputs[0];
if (input->dims.size() != 4) {
return absl::InvalidArgumentError("input.dims.size != 4");
}
if (filter->dims.size() != 4) {
return absl::InvalidArgumentError("filter.dims.size != 4");
}
if (output->dims.size() != 4) {
return absl::InvalidArgumentError("output.dims.size != 4");
}
if (input->dims[0] != output->dims[0]) {
return absl::InvalidArgumentError("input.b != output.b");
}
const int input_depth = input->dims[3];
const int output_depth = output->dims[3];
if (filter->dims[3] != output_depth) {
return absl::InvalidArgumentError("filter.i != output.c");
}
if (output_depth != input_depth * depth_multiplier) {
return absl::InvalidArgumentError("output.c != input.c * depth_multiplier");
}
if (bias && NumElements(bias->dims) != output_depth) {
return absl::InvalidArgumentError("bias.size != output.c");
}
if (depth_multiplier != 1 && input_depth != 1) {
return absl::UnimplementedError("depth_multiplier != 1 && input.c != 1");
}
return absl::OkStatus();
}
absl::Status CheckCumsumGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 2) {
return absl::InvalidArgumentError("Expects 2 inputs and 1 output");
}
auto error = absl::InvalidArgumentError(
"Input/output must be float type and indices must be constant int32 "
"type");
if ((op_sig.inputs.at(0).type != kTfLiteFloat16 &&
op_sig.inputs.at(0).type != kTfLiteFloat32) ||
(op_sig.outputs.at(0).type != op_sig.inputs.at(0).type) ||
(op_sig.inputs.at(1).type != kTfLiteInt32 ||
!op_sig.inputs.at(1).is_const)) {
return error;
}
return absl::OkStatus();
}
absl::Status CheckOneHotGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 4 && op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError("Expects 4 inputs and 1 output");
}
absl::Status error = absl::InvalidArgumentError(
"Indices must be int32 type, on/off tensors must be constant, scalar, "
"float type, axis must be -1 or last dim");
if (op_sig.inputs[0].type != kTfLiteInt32) {
return error;
}
auto* one_hot_options =
reinterpret_cast<TfLiteOneHotParams*>(op_sig.builtin_data);
const int num_dims = op_sig.inputs[0].dims.size();
if (one_hot_options->axis != -1 &&
one_hot_options->axis != op_sig.inputs[0].dims[num_dims - 1]) {
return error;
}
for (int i = 0; i < num_dims - 1; ++i) {
if (num_dims > 3 && i == 0) {
continue;
}
if (op_sig.inputs.at(0).dims[i] != 1) {
return absl::InvalidArgumentError(
absl::StrCat("Unspported non-singleton dim at ", i));
}
}
if (op_sig.inputs.at(2).type != kTfLiteFloat32 ||
op_sig.inputs.at(3).type != kTfLiteFloat32) {
return error;
}
if (!op_sig.inputs.at(2).is_const || !op_sig.inputs.at(3).is_const ||
op_sig.inputs.at(2).dims.size() > 1 ||
op_sig.inputs.at(3).dims.size() > 1) {
return error;
}
if ((!op_sig.inputs.at(2).dims.empty() && op_sig.inputs.at(2).dims[0] > 1) ||
(!op_sig.inputs.at(3).dims.empty() && op_sig.inputs.at(3).dims[0] > 1)) {
return error;
}
return absl::OkStatus();
}
absl::Status CheckSelectV2GpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 3 || op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError("Expected 3 inputs and 1 output");
}
absl::Status error = absl::InvalidArgumentError(
"Cond must be float or bool type, if, else tensors must be float and "
"either be same the shape as output or constant, scalar.");
if ((op_sig.inputs.at(0).type != kTfLiteBool &&
op_sig.inputs.at(0).type != kTfLiteFloat16 &&
op_sig.inputs.at(0).type != kTfLiteFloat32) ||
(op_sig.inputs.at(1).type != kTfLiteFloat16 &&
op_sig.inputs.at(1).type != kTfLiteFloat32) ||
(op_sig.inputs.at(2).type != kTfLiteFloat16 &&
op_sig.inputs.at(2).type != kTfLiteFloat32)) {
return error;
}
std::vector<int32_t> output_dims = op_sig.outputs[0].dims;
if (!op_sig.inputs.at(1).dims.empty() &&
(op_sig.inputs.at(1).dims != output_dims) &&
(op_sig.inputs.at(1).dims.size() > 1 ||
op_sig.inputs.at(1).dims[0] > 1)) {
return error;
}
if (op_sig.inputs.at(1).is_const && op_sig.inputs.at(1).dims.size() == 2) {
return absl::InvalidArgumentError(
"2-D if tensor only supported if constant.");
}
if (!op_sig.inputs.at(2).dims.empty() &&
(op_sig.inputs.at(2).dims != output_dims) &&
(op_sig.inputs.at(2).dims.size() > 1 ||
op_sig.inputs.at(2).dims[0] > 1)) {
return error;
}
if (op_sig.inputs.at(2).is_const && op_sig.inputs.at(2).dims.size() == 2) {
return absl::InvalidArgumentError(
"2-D else tensor only supported if constant.");
}
return absl::OkStatus();
}
absl::Status CheckCustomOpsGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.custom_name == "Convolution2DTransposeBias") {
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
const TfLiteTransposeConvParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(
CheckStrides(tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
if (op_sig.custom_name == "MaxPoolingWithArgmax2D") {
return CheckPooling2DGpuDelegateCompatibility(op_sig);
}
if (op_sig.custom_name == "MaxUnpooling2D") {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
2,
1));
const TfLitePoolParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckKernelsAndStrides(
tf_options->filter_height, tf_options->filter_width,
tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
if (op_sig.custom_name == "Resampler") {
return CheckInputsOutputs(op_sig,
2,
1);
}
return absl::InvalidArgumentError(
absl::StrCat("Not supported custom op ", op_sig.custom_name));
}
bool CheckIsBroadcastable(const std::vector<int32_t>* longer_dims,
const std::vector<int32_t>* shorter_dims) {
int idx_1 = longer_dims->size() - 1;
int idx_2 = shorter_dims->size() - 1;
int max_idx = std::max(idx_1, idx_2);
int data_1 = 0;
int data_2 = 0;
for (int i = max_idx; i >= 0; --i) {
data_1 = idx_1 < 0 ? 1 : longer_dims->at(idx_1);
data_2 = idx_2 < 0 ? 1 : shorter_dims->at(idx_2);
if (data_1 != data_2 && data_1 != 1 && data_2 != 1) {
return false;
}
--idx_1;
--idx_2;
}
return true;
}
absl::Status CheckAddMulBroadcastCompatibility(
const OpSignatureTensorSpec& input0, const OpSignatureTensorSpec& input1,
GpuCompatibilityFlags flags) {
if (input0.dims.size() > 1 && input1.dims.size() > 1 &&
input0.dims.size() != input1.dims.size()) {
const std::vector<int32_t>*longer_dims, *shorter_dims;
if (input0.dims.size() >= input1.dims.size()) {
longer_dims = &input0.dims;
shorter_dims = &input1.dims;
} else {
longer_dims = &input1.dims;
shorter_dims = &input0.dims;
}
bool is_broadcastable = false;
if (flags == GpuCompatibilityFlags::kEnhancedBroadcast) {
is_broadcastable = CheckIsBroadcastable(longer_dims, shorter_dims);
} else {
if (longer_dims->size() == 4 && shorter_dims->size() == 3 &&
longer_dims->at(0) == 1) {
is_broadcastable = true;
} else if (longer_dims->size() == 4 && shorter_dims->size() == 2 &&
longer_dims->at(0) == 1 && shorter_dims->at(0) == 1 &&
shorter_dims->at(1) == 1) {
is_broadcastable = true;
} else if (longer_dims->size() == 4 && shorter_dims->size() == 2 &&
longer_dims->at(0) == shorter_dims->at(0) &&
longer_dims->at(3) == shorter_dims->at(1)) {
is_broadcastable = true;
}
}
if (!is_broadcastable) {
return absl::UnimplementedError(
absl::StrCat("Doesn't support broadcasting - input0: [",
absl::StrJoin(input0.dims, ","), "], input1: [",
absl::StrJoin(input1.dims, ","), "]"));
}
}
return absl::OkStatus();
}
}
absl::Status CheckGpuDelegateCompatibility(const OpSignature& op_sig,
GpuCompatibilityFlags flags) {
TfLiteBuiltinOperator opcode = static_cast<TfLiteBuiltinOperator>(op_sig.op);
switch (opcode) {
case kTfLiteBuiltinAdd: {
if (op_sig.inputs.size() != 2) {
return absl::UnimplementedError("ADD requires two input tensors.");
}
const auto& input0 = op_sig.inputs.at(0);
const auto& input1 = op_sig.inputs.at(1);
auto broadcastable =
CheckAddMulBroadcastCompatibility(input0, input1, flags);
if (!broadcastable.ok()) {
return broadcastable;
}
const TfLiteAddParams* tf_options;
return RetrieveBuiltinData(op_sig, &tf_options);
}
case kTfLiteBuiltinAddN: {
return op_sig.inputs.size() == 2
? absl::OkStatus()
: absl::UnimplementedError("ADD_N only supports 2 inputs.");
}
case kTfLiteBuiltinAveragePool2d:
return CheckPooling2DGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinBatchMatmul: {
const int num_inputs = op_sig.inputs.size();
const int num_outputs = op_sig.outputs.size();
if (!(num_inputs == 2 && num_outputs == 1)) {
return absl::InternalError(
absl::StrCat("Expected 2 inputs and 1 output, got: ", num_inputs,
" inputs and ", num_outputs, " outputs"));
}
return absl::OkStatus();
}
case kTfLiteBuiltinCast:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
if (op_sig.inputs.at(0).type == kTfLiteBool &&
(op_sig.outputs.at(0).type == kTfLiteFloat16 ||
op_sig.outputs.at(0).type == kTfLiteFloat32)) {
return absl::OkStatus();
} else if ((op_sig.inputs.at(0).type == kTfLiteFloat16 ||
op_sig.inputs.at(0).type == kTfLiteFloat32) &&
op_sig.outputs.at(0).type == kTfLiteBool) {
return absl::OkStatus();
} else if ((op_sig.inputs.at(0).type == kTfLiteFloat32 ||
op_sig.inputs.at(0).type == kTfLiteInt32) &&
(op_sig.outputs.at(0).type == kTfLiteFloat32 ||
op_sig.outputs.at(0).type == kTfLiteInt32)) {
return absl::OkStatus();
} else {
return absl::UnimplementedError(absl::StrCat(
"Not supported Cast case. Input type: ",
TfLiteTypeGetName(op_sig.inputs.at(0).type), " and output type: ",
TfLiteTypeGetName(op_sig.outputs.at(0).type)));
}
case kTfLiteBuiltinConcatenation: {
const TfLiteConcatenationParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
return absl::OkStatus();
}
case kTfLiteBuiltinConv2d: {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckStridesAndDilation(
tf_options->stride_height, tf_options->stride_width,
tf_options->dilation_height_factor,
tf_options->dilation_width_factor));
return IsActivationSupported(tf_options->activation);
}
case kTfLiteBuiltinCumsum:
return CheckCumsumGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinDensify:
return CheckInputsOutputs(op_sig, 0,
1);
case kTfLiteBuiltinDepthwiseConv2d:
return CheckDepthwiseConvGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinDepthToSpace: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteDepthToSpaceParams* d2s_params;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &d2s_params));
if (d2s_params->block_size == 1) {
return absl::InvalidArgumentError(
"DEPTH_TO_SPACE block_size = 1 is a no-op.");
}
if (d2s_params->block_size < 1) {
return absl::InvalidArgumentError(
"DEPTH_TO_SPACE block_size must be > 1.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinDequantize: {
const int num_inputs = op_sig.inputs.size();
const int num_outputs = op_sig.outputs.size();
if (num_inputs != 1 || num_outputs != 1) {
return absl::InternalError(absl::StrCat(
"Expected 1 input & output each from Dequantize, got: %d, %d",
num_inputs, num_outputs));
}
if (op_sig.inputs[0].type == kTfLiteInt16) {
return absl::UnimplementedError("Unsupported dequantization type.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinDynamicUpdateSlice: {
if (op_sig.inputs.size() != 3) {
return absl::UnimplementedError(
"DynamicUpdateSlice requires 3 inputs.");
}
OpSignatureTensorSpec operand = op_sig.inputs[0];
OpSignatureTensorSpec update_slice = op_sig.inputs[1];
OpSignatureTensorSpec start_indices = op_sig.inputs[2];
if (operand.dims.size() == 4 && operand.dims[0] != 1) {
return absl::UnimplementedError(
"DynamicUpdateSlice only support 4D operand with batch size 1.");
}
if (start_indices.dims.size() > 1) {
return absl::UnimplementedError(
"DynamicUpdateSlice only support 1D start_indices.");
}
if (operand.type != update_slice.type) {
return absl::InternalError(
absl::StrCat("Array to update and updated slice must have the same "
"data type, but got: array to update: ",
operand.type, ", updated slice: ", update_slice.type));
}
if (start_indices.dims.size() != 1) {
return absl::InternalError(
absl::StrCat("Start indices must have be 1D, but got: ",
start_indices.dims.size()));
}
if (start_indices.type != kTfLiteInt32) {
return absl::InvalidArgumentError(
"start_indices must be of type int32.");
}
if (update_slice.dims.size() != operand.dims.size()) {
return absl::InternalError(absl::StrCat(
"Operand and update must have the same number of "
"dimensions, but got: operand: ",
operand.dims.size(), ", update: ", update_slice.dims.size()));
}
return absl::OkStatus();
}
case kTfLiteBuiltinFullyConnected: {
const TfLiteFullyConnectedParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->weights_format !=
kTfLiteFullyConnectedWeightsFormatDefault) {
return absl::UnimplementedError(
absl::StrCat("Unsupported FullyConnected weights format: ",
tf_options->weights_format));
}
if (GetNumberOfRuntimeInputs(op_sig) > 2) {
return absl::UnimplementedError(
"FullyConnected doesn't support more than 2 runtime inputs.");
}
if (op_sig.inputs[0].is_const) {
return absl::UnimplementedError(
"FullyConnected doesn't support constant input.");
}
if (tf_options->keep_num_dims == true) {
const auto& input = op_sig.inputs.at(0);
const auto& output = op_sig.outputs.at(0);
if (input.dims.size() != output.dims.size()) {
return absl::UnimplementedError(
"Input and output dimensions different and FullyConnected "
"doesn't "
"support keep_num_dims.");
}
}
return absl::OkStatus();
}
case kTfLiteBuiltinGather:
if (!CheckInputsConstsOutputs(op_sig, 2,
0,
1)
.ok() &&
!CheckInputsConstsOutputs(op_sig, 1,
1,
1)
.ok()) {
return absl::InvalidArgumentError(
"Op can only handle 1 or 2 operand(s).");
}
if (op_sig.inputs.at(0).type == kTfLiteInt32) {
return absl::UnimplementedError("Does not accept INT32 input.\n");
}
if (op_sig.inputs[1].dims.size() != 1) {
return absl::UnimplementedError("Only support 1D indices\n");
}
return op_sig.inputs.at(1).type == kTfLiteInt32
? absl::OkStatus()
: absl::UnimplementedError("Only accept INT32 indices\n");
case kTfLiteBuiltinHardSwish:
return CheckInputsOutputs(op_sig, 1,
1);
case kTfLiteBuiltinLstm: {
const TfLiteLSTMParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
switch (tf_options->kernel_type) {
case kTfLiteLSTMFullKernel: {
const int inputs = op_sig.inputs.size();
if (inputs != 20 && inputs != 24) {
return absl::InternalError(
absl::StrCat("Expected 20 or 24 input tensors, but node has ",
inputs, " input(s)."));
}
const int runtime_outputs = op_sig.outputs.size();
if (runtime_outputs != 1) {
return absl::InternalError(
absl::StrCat("Expected 1 output tensor, but node has ",
runtime_outputs, " output(s)."));
}
if (tf_options->activation != kTfLiteActSigmoid &&
tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(absl::StrCat(
"Only sigmoid or tanh activation is supported, but node has ",
tf_options->activation));
}
return absl::OkStatus();
}
case kTfLiteLSTMBasicKernel:
RETURN_IF_ERROR(
CheckInputsConstsOutputs(op_sig, 3,
2,
4));
if (tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(
absl::StrCat("Only TANH activation is supported. but node has ",
tf_options->activation));
} | #include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace {
absl::Status CheckGpuDelegateCompatibility(const tflite::Model* model) {
auto subgraphs = model->subgraphs();
for (int i = 0; i < subgraphs->Length(); ++i) {
const SubGraph* subgraph = subgraphs->Get(i);
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
const OperatorCode* op_code =
model->operator_codes()->Get(op->opcode_index());
auto status = CheckGpuDelegateCompatibility(op_code, op, subgraph, model);
if (!status.ok()) {
return status;
}
}
}
return absl::OkStatus();
}
}
TEST(CheckGpuDelegateCompatibility, Conv2DModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv_huge_im2col.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_TRUE(CheckGpuDelegateCompatibility(model->GetModel()).ok());
}
TEST(CheckGpuDelegateCompatibility, Conv3DModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv3d_huge_im2col.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_EQ(CheckGpuDelegateCompatibility(model->GetModel()).message(),
"Not supported op CONV_3D");
}
TEST(CheckGpuDelegateCompatibility, FlexModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/multi_add_flex.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_EQ(CheckGpuDelegateCompatibility(model->GetModel()).message(),
"Not supported custom op FlexAddV2");
}
TEST(CheckGpuDelegateCompatibility, FCConstInput) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_FULLY_CONNECTED;
auto params = std::make_unique<TfLiteFullyConnectedParams>();
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(1);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].is_const = true;
EXPECT_EQ(CheckGpuDelegateCompatibility(op_sig).message(),
"FullyConnected doesn't support constant input.");
}
TEST(CheckGpuDelegateCompatibility, Add1Dto3DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {4, 1, 2};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {2};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto3DBroadcastFail) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 100, 256};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {100, 256};
EXPECT_EQ(CheckGpuDelegateCompatibility(op_sig).message(),
"Doesn't support broadcasting - input0: [1,100,256], input1: "
"[100,256]");
}
TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastFail) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {4, 1, 1, 2};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1, 2};
EXPECT_EQ(
CheckGpuDelegateCompatibility(op_sig).message(),
"Doesn't support broadcasting - input0: [4,1,1,2], input1: [1,1,2]");
}
TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 128, 513, 3};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {128, 513, 3};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 512, 512, 1};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess2) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 384, 384, 3};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess3) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 4, 4, 10};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 10};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
} |
729 | cpp | tensorflow/tensorflow | mini_benchmark | tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_MINI_BENCHMARK_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_MINI_BENCHMARK_H_
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/synchronization/mutex.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace tflite {
namespace acceleration {
class MiniBenchmark {
public:
virtual ComputeSettingsT GetBestAcceleration() = 0;
virtual void TriggerMiniBenchmark() = 0;
virtual void SetEventTimeoutForTesting(int64_t timeout_us) = 0;
virtual std::vector<MiniBenchmarkEventT> MarkAndGetEventsToLog() = 0;
virtual int NumRemainingAccelerationTests() = 0;
MiniBenchmark() {}
virtual ~MiniBenchmark() {}
MiniBenchmark(MiniBenchmark&) = delete;
MiniBenchmark& operator=(const MiniBenchmark&) = delete;
MiniBenchmark(MiniBenchmark&&) = delete;
MiniBenchmark& operator=(const MiniBenchmark&&) = delete;
};
std::unique_ptr<MiniBenchmark> CreateMiniBenchmark(
const MinibenchmarkSettings& settings, const std::string& model_namespace,
const std::string& model_id);
class MinibenchmarkImplementationRegistry {
public:
using CreatorFunction = std::function<std::unique_ptr<MiniBenchmark>(
const MinibenchmarkSettings& ,
const std::string& , const std::string& )>;
static std::unique_ptr<MiniBenchmark> CreateByName(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id);
struct Register {
Register(const std::string& name, CreatorFunction creator_function);
};
private:
void RegisterImpl(const std::string& name, CreatorFunction creator_function);
std::unique_ptr<MiniBenchmark> CreateImpl(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id);
static MinibenchmarkImplementationRegistry* GetSingleton();
absl::Mutex mutex_;
std::unordered_map<std::string, CreatorFunction> factories_
ABSL_GUARDED_BY(mutex_);
};
}
}
#define TFLITE_REGISTER_MINI_BENCHMARK_FACTORY_FUNCTION(name, f) \
static auto* g_tflite_mini_benchmark_##name##_ = \
new MinibenchmarkImplementationRegistry::Register(#name, f);
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.h"
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "flatbuffers/flatbuffers.h"
namespace tflite {
namespace acceleration {
namespace {
class NoopMiniBenchmark : public MiniBenchmark {
public:
ComputeSettingsT GetBestAcceleration() override { return ComputeSettingsT(); }
void TriggerMiniBenchmark() override {}
void SetEventTimeoutForTesting(int64_t) override {}
std::vector<MiniBenchmarkEventT> MarkAndGetEventsToLog() override {
return {};
}
int NumRemainingAccelerationTests() override { return -1; }
};
}
std::unique_ptr<MiniBenchmark> CreateMiniBenchmark(
const MinibenchmarkSettings& settings, const std::string& model_namespace,
const std::string& model_id) {
absl::StatusOr<std::unique_ptr<MiniBenchmark>> s_or_mb =
MinibenchmarkImplementationRegistry::CreateByName(
"Impl", settings, model_namespace, model_id);
if (!s_or_mb.ok()) {
return std::unique_ptr<MiniBenchmark>(new NoopMiniBenchmark());
} else {
return std::move(*s_or_mb);
}
}
void MinibenchmarkImplementationRegistry::RegisterImpl(
const std::string& name, CreatorFunction creator_function) {
absl::MutexLock lock(&mutex_);
factories_[name] = creator_function;
}
std::unique_ptr<MiniBenchmark> MinibenchmarkImplementationRegistry::CreateImpl(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id) {
absl::MutexLock lock(&mutex_);
auto it = factories_.find(name);
return (it != factories_.end())
? it->second(settings, model_namespace, model_id)
: nullptr;
}
MinibenchmarkImplementationRegistry*
MinibenchmarkImplementationRegistry::GetSingleton() {
static auto* instance = new MinibenchmarkImplementationRegistry();
return instance;
}
std::unique_ptr<MiniBenchmark>
MinibenchmarkImplementationRegistry::CreateByName(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id) {
auto* const instance = MinibenchmarkImplementationRegistry::GetSingleton();
return instance->CreateImpl(name, settings, model_namespace, model_id);
}
MinibenchmarkImplementationRegistry::Register::Register(
const std::string& name, CreatorFunction creator_function) {
auto* const instance = MinibenchmarkImplementationRegistry::GetSingleton();
instance->RegisterImpl(name, creator_function);
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.h"
#include <unistd.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_float_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
namespace tflite {
namespace acceleration {
namespace {
TEST(BasicMiniBenchmarkTest, EmptySettings) {
proto::MinibenchmarkSettings settings_proto;
flatbuffers::FlatBufferBuilder empty_settings_buffer_;
const MinibenchmarkSettings* empty_settings =
ConvertFromProto(settings_proto, &empty_settings_buffer_);
std::unique_ptr<MiniBenchmark> mb(
CreateMiniBenchmark(*empty_settings, "ns", "id"));
mb->TriggerMiniBenchmark();
const ComputeSettingsT acceleration = mb->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_TRUE(mb->MarkAndGetEventsToLog().empty());
EXPECT_EQ(-1, mb->NumRemainingAccelerationTests());
}
class MiniBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
if (should_perform_test_) {
mobilenet_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_float_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_float_validation_model,
g_tflite_acceleration_embedded_mobilenet_float_validation_model_len);
}
}
void SetupBenchmark(proto::Delegate delegate, const std::string& model_path,
bool reset_storage = true,
const nnapi::NnApiSupportLibrary* nnapi_sl = nullptr) {
proto::MinibenchmarkSettings settings;
proto::TFLiteSettings* tflite_settings = settings.add_settings_to_test();
tflite_settings->set_delegate(delegate);
if ((delegate == proto::Delegate::NNAPI) && nnapi_sl) {
std::cerr << "Using NNAPI SL\n";
tflite_settings->mutable_nnapi_settings()->set_support_library_handle(
reinterpret_cast<int64_t>(nnapi_sl->getFL5()));
}
proto::ModelFile* file = settings.mutable_model_file();
file->set_filename(model_path);
proto::BenchmarkStoragePaths* paths = settings.mutable_storage_paths();
paths->set_storage_file_path(::testing::TempDir() + "/storage.fb");
if (reset_storage) {
(void)unlink(paths->storage_file_path().c_str());
(void)unlink((paths->storage_file_path() + ".extra.fb").c_str());
}
paths->set_data_directory_path(::testing::TempDir());
if (delegate != proto::Delegate::NONE) {
proto::TFLiteSettings* cpu_tflite_settings =
settings.add_settings_to_test();
cpu_tflite_settings->set_disable_default_delegates(false);
}
settings_ = ConvertFromProto(settings, &settings_buffer_);
mb_ = CreateMiniBenchmark(*settings_, ns_, model_id_);
}
void TriggerBenchmark(proto::Delegate delegate, const std::string& model_path,
bool reset_storage = true,
const nnapi::NnApiSupportLibrary* nnapi_sl = nullptr) {
SetupBenchmark(delegate, model_path, reset_storage, nnapi_sl);
mb_->TriggerMiniBenchmark();
}
void WaitForValidationCompletion(
absl::Duration timeout = absl::Seconds(300)) {
absl::Time deadline = absl::Now() + timeout;
while (absl::Now() < deadline) {
if (mb_->NumRemainingAccelerationTests() == 0) return;
absl::SleepFor(absl::Milliseconds(200));
}
ASSERT_NE(0, mb_->NumRemainingAccelerationTests());
}
const std::string ns_ = "org.tensorflow.lite.mini_benchmark.test";
const std::string model_id_ = "test_minibenchmark_model";
bool should_perform_test_ = true;
std::unique_ptr<MiniBenchmark> mb_;
std::string mobilenet_model_path_;
flatbuffers::FlatBufferBuilder settings_buffer_;
const MinibenchmarkSettings* settings_;
};
TEST_F(MiniBenchmarkTest, OnlyCPUSettings) {
if (!should_perform_test_) return;
SetupBenchmark(proto::Delegate::NONE, mobilenet_model_path_);
EXPECT_EQ(-1, mb_->NumRemainingAccelerationTests());
ComputeSettingsT acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_EQ(1, mb_->NumRemainingAccelerationTests());
mb_->TriggerMiniBenchmark();
WaitForValidationCompletion();
acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
}
TEST_F(MiniBenchmarkTest, RunSuccessfully) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->MarkAndGetEventsToLog();
const ComputeSettingsT acceleration1 = mb_->GetBestAcceleration();
const ComputeSettingsT acceleration2 = mb_->GetBestAcceleration();
EXPECT_EQ(acceleration1, acceleration2);
#ifndef ADDRESS_SANITIZER
ASSERT_NE(nullptr, acceleration1.tflite_settings);
EXPECT_EQ(tflite::Delegate_XNNPACK, acceleration1.tflite_settings->delegate);
#endif
EXPECT_EQ(model_id_, acceleration1.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration1.model_namespace_for_statistics);
auto events = mb_->MarkAndGetEventsToLog();
EXPECT_EQ(1, events.size());
const auto& decision = events.front().best_acceleration_decision;
EXPECT_NE(nullptr, decision);
#ifndef ADDRESS_SANITIZER
EXPECT_EQ(tflite::Delegate_XNNPACK,
decision->min_latency_event->tflite_settings->delegate);
#endif
}
TEST_F(MiniBenchmarkTest, BestAccelerationEventIsMarkedLoggedAfterRestart) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->MarkAndGetEventsToLog();
mb_->GetBestAcceleration();
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_,
false);
EXPECT_EQ(0, mb_->NumRemainingAccelerationTests());
const ComputeSettingsT acceleration = mb_->GetBestAcceleration();
#ifndef ADDRESS_SANITIZER
ASSERT_NE(nullptr, acceleration.tflite_settings);
EXPECT_EQ(tflite::Delegate_XNNPACK, acceleration.tflite_settings->delegate);
#endif
EXPECT_EQ(model_id_, acceleration.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration.model_namespace_for_statistics);
auto events = mb_->MarkAndGetEventsToLog();
EXPECT_EQ(1, events.size());
}
TEST_F(MiniBenchmarkTest,
BestAccelerationEventIsNotReMarkedLoggedAfterRestart) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->GetBestAcceleration();
mb_->MarkAndGetEventsToLog();
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_,
false);
mb_->GetBestAcceleration();
EXPECT_TRUE(mb_->MarkAndGetEventsToLog().empty());
}
TEST_F(MiniBenchmarkTest, DelegatePluginNotSupported) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::HEXAGON, mobilenet_model_path_);
WaitForValidationCompletion();
const ComputeSettingsT acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_EQ(model_id_, acceleration.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration.model_namespace_for_statistics);
const auto events = mb_->MarkAndGetEventsToLog();
bool is_found = false;
for (const auto& event : events) {
const auto& t = event.benchmark_event;
if (t == nullptr) continue;
if (t->event_type == tflite::BenchmarkEventType_ERROR &&
t->error->mini_benchmark_error_code ==
tflite::acceleration::kMinibenchmarkDelegateNotSupported) {
is_found = true;
break;
}
}
EXPECT_TRUE(is_found);
}
#ifdef __ANDROID__
TEST_F(MiniBenchmarkTest, UseNnApiSl) {
if (!should_perform_test_) return;
std::string nnapi_sl_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_path_);
ASSERT_TRUE(nnapi_sl);
TriggerBenchmark(proto::Delegate::NNAPI, mobilenet_model_path_,
true, nnapi_sl.get());
WaitForValidationCompletion();
EXPECT_TRUE(tflite::acceleration::WasNnApiSlInvoked());
}
#endif
}
}
} |
730 | cpp | tensorflow/tensorflow | validator_runner_impl | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_RUNNER_IMPL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_RUNNER_IMPL_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
namespace tflite {
namespace acceleration {
class ValidatorRunnerImpl {
public:
ValidatorRunnerImpl(
const std::string& fd_or_model_path, const std::string& storage_path,
const std::string& data_directory_path, int timeout_ms,
std::unique_ptr<CustomValidationEmbedder> custom_validation_embedder,
ErrorReporter* error_reporter, const NnApiSLDriverImplFL5* nnapi_sl,
const TfLiteDelegatePlugin* gpu_plugin_handle,
const std::string& validation_entrypoint_name,
AbstractBenchmarkResultEvaluator* benchmark_evaluator)
: fd_or_model_path_(fd_or_model_path),
storage_path_(storage_path),
data_directory_path_(data_directory_path),
timeout_ms_(timeout_ms),
custom_validation_embedder_(std::move(custom_validation_embedder)),
error_reporter_(error_reporter),
storage_(storage_path_, error_reporter_),
nnapi_helper_(nnapi_sl),
gpu_helper_(gpu_plugin_handle),
validation_entrypoint_helper_(validation_entrypoint_name,
error_reporter_),
benchmark_evaluator_(benchmark_evaluator) {}
MinibenchmarkStatus Init();
void TriggerValidationAsync(
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings,
absl::string_view storage_path);
std::vector<const BenchmarkEvent*> GetSuccessfulResultsFromStorage();
int GetNumCompletedResults();
std::vector<flatbuffers::FlatBufferBuilder> GetCompletedResults();
private:
class NnapiHelper {
public:
explicit NnapiHelper(const NnApiSLDriverImplFL5* nnapi_sl)
: nnapi_sl_(nnapi_sl) {}
MinibenchmarkStatus Load();
const std::string& nnapi_sl_path() const { return nnapi_sl_path_; }
private:
const NnApiSLDriverImplFL5* nnapi_sl_;
std::string nnapi_sl_path_;
};
class GpuHelper {
public:
explicit GpuHelper(const TfLiteDelegatePlugin* gpu_plugin_handle)
: gpu_plugin_handle_(gpu_plugin_handle) {}
MinibenchmarkStatus Load();
const std::string& gpu_so_path() const { return gpu_so_path_; }
private:
const TfLiteDelegatePlugin* gpu_plugin_handle_;
std::string gpu_so_path_;
};
class ValidationEntrypointHelper {
public:
using EntrypointFunc = int(int argc, char** argv);
explicit ValidationEntrypointHelper(
const std::string& validation_entrypoint_name,
ErrorReporter* error_reporter)
: validation_entrypoint_name_(validation_entrypoint_name),
error_reporter_(error_reporter) {}
MinibenchmarkStatus Validate();
EntrypointFunc* LoadEntrypoint();
const std::string& name() { return validation_entrypoint_name_; }
private:
std::string validation_entrypoint_name_;
ErrorReporter* error_reporter_;
};
std::string fd_or_model_path_;
std::string storage_path_;
std::string data_directory_path_;
int timeout_ms_ = 0;
std::unique_ptr<CustomValidationEmbedder> custom_validation_embedder_;
flatbuffers::FlatBufferBuilder model_with_custom_input_;
std::unique_ptr<Allocation> model_allocation_ = nullptr;
ErrorReporter* error_reporter_;
FlatbufferStorage<BenchmarkEvent> storage_;
NnapiHelper nnapi_helper_;
GpuHelper gpu_helper_;
ValidationEntrypointHelper validation_entrypoint_helper_;
AbstractBenchmarkResultEvaluator* benchmark_evaluator_ = nullptr;
};
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <ostream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
std::pair<std::unique_ptr<Allocation>, std::vector<uint8_t>> CopyModel(
const Allocation* input, ErrorReporter* error_reporter) {
std::vector<uint8_t> copy;
if (!input) {
return {nullptr, copy};
}
copy.resize(input->bytes());
memcpy(copy.data(), input->base(), input->bytes());
return {std::make_unique<MemoryAllocation>(copy.data(), copy.size(),
error_reporter),
std::move(copy)};
}
class FdHolder {
public:
explicit FdHolder(int fd) : fd_(fd) {}
FdHolder(FdHolder&& other) = default;
FdHolder& operator=(FdHolder&& other) = default;
~FdHolder() {
if (fd_ > 0) {
close(fd_);
}
}
private:
int fd_;
};
std::unique_ptr<FdHolder> UpdateModelPathIfUsingFd(std::string& model_path) {
if (!absl::StartsWith(model_path, "fd:")) {
return nullptr;
}
std::vector<std::string> parts = absl::StrSplit(model_path, ':');
int model_fd;
if (!absl::SimpleAtoi(parts[1], &model_fd)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to parse file descriptor %s from model_path %s",
parts[1].c_str(), model_path.c_str());
return nullptr;
}
int new_fd = dup(model_fd);
if (new_fd < 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Failed to dup() file descriptor. Original fd: %d errno: %d", model_fd,
errno);
return nullptr;
}
parts[1] = std::to_string(new_fd);
model_path = absl::StrJoin(parts, ":");
return std::make_unique<FdHolder>(new_fd);
}
}
MinibenchmarkStatus ValidatorRunnerImpl::Init() {
if (storage_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "storage_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (data_directory_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "data_directory_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (benchmark_evaluator_ == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter_, "benchmark_evaluator is null.");
return kMinibenchmarkPreconditionNotMet;
}
MinibenchmarkStatus status = storage_.Read();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Storage::Read failed.");
return status;
}
std::unique_ptr<tools::ModelLoader> model_loader =
tools::CreateModelLoaderFromPath(fd_or_model_path_);
if (!model_loader) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to parse model path.");
return kMinibenchmarkPreconditionNotMet;
}
if (!model_loader->Init() || !model_loader->GetModel()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load model.");
return kMinibenchmarkModelInitFailed;
}
if (custom_validation_embedder_) {
status = custom_validation_embedder_->BuildModel(
*model_loader->GetModel()->GetModel(), model_with_custom_input_);
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Failed to embed golden input to model: %d",
static_cast<int>(status));
return status;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
model_with_custom_input_.GetBufferPointer(),
model_with_custom_input_.GetSize(), error_reporter_);
} else if (model_loader->type() ==
tools::ModelLoader::Type::kBufferModelLoader) {
const Allocation* alloc = model_loader->GetModel()->allocation();
if (!alloc || !alloc->valid() || !alloc->base() || alloc->bytes() <= 0) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Internal error: BufferModelLoader doesn't have a "
"valid allocation.");
return kMinibenchmarkPreconditionNotMet;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
alloc->base(), alloc->bytes(), error_reporter_);
}
status = nnapi_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load NNAPI SL: %d",
static_cast<int>(status));
return status;
}
status = gpu_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load GPU Module: %d",
static_cast<int>(status));
return status;
}
status = validation_entrypoint_helper_.Validate();
if (status != kMinibenchmarkSuccess) {
return status;
}
ProcessRunner check_runner(data_directory_path_,
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint_helper_.LoadEntrypoint(),
timeout_ms_, error_reporter_);
status = check_runner.Init();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Runner::Init returned %d",
static_cast<int>(status));
return status;
}
return kMinibenchmarkSuccess;
}
void ValidatorRunnerImpl::TriggerValidationAsync(
std::vector<FlatBufferBuilder> tflite_settings,
absl::string_view storage_path) {
if (tflite_settings.empty()) {
return;
}
storage_ = FlatbufferStorage<BenchmarkEvent>(storage_path, error_reporter_);
std::thread detached_thread(
[original_model_path = fd_or_model_path_,
storage_path = std::string(storage_path),
data_directory_path = data_directory_path_,
tflite_settings = std::move(tflite_settings),
validation_entrypoint_name =
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint = validation_entrypoint_helper_.LoadEntrypoint(),
nnapi_sl_path = nnapi_helper_.nnapi_sl_path(),
gpu_so_path = gpu_helper_.gpu_so_path(),
allocation_and_model =
CopyModel(model_allocation_.get(), error_reporter_),
timeout_ms = timeout_ms_]() {
FileLock lock(absl::StrCat(storage_path, ".parent_lock"));
if (!lock.TryLock()) {
return;
}
std::string model_path = original_model_path;
std::unique_ptr<FdHolder> fd_holder =
UpdateModelPathIfUsingFd(model_path);
for (auto& one_setting : tflite_settings) {
FlatbufferStorage<BenchmarkEvent> storage(storage_path);
TFLiteSettingsT tflite_settings_obj;
flatbuffers::GetRoot<TFLiteSettings>(one_setting.GetBufferPointer())
->UnPackTo(&tflite_settings_obj);
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Run validation with entry point '%s' %s",
validation_entrypoint_name, storage_path.c_str());
ProcessRunner runner(data_directory_path, validation_entrypoint_name,
validation_entrypoint, timeout_ms);
int exitcode = 0;
int signal = 0;
MinibenchmarkStatus status = runner.Init();
if (status == kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
status = storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_START, 0, 0,
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
if (status != kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_INITIALIZATION,
exitcode, signal, {},
status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
continue;
}
std::vector<std::string> args;
if (!allocation_and_model.first) {
args.push_back(model_path);
}
args.push_back(storage_path);
args.push_back(data_directory_path);
if (tflite_settings_obj.delegate == tflite::Delegate_NNAPI &&
!nnapi_sl_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using NNAPI support library at path '%s'",
nnapi_sl_path.c_str());
args.push_back(nnapi_sl_path);
} else if (tflite_settings_obj.delegate == tflite::Delegate_GPU &&
!gpu_so_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using GPU Delegate Module at path '%s'",
gpu_so_path.c_str());
args.push_back(gpu_so_path);
}
std::string output;
status = runner.Run(allocation_and_model.first.get(), args, &output,
&exitcode, &signal);
if (status != kMinibenchmarkSuccess) {
std::cout << "Run() returned " << status << std::endl;
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN, exitcode,
signal, {}, status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
}
});
detached_thread.detach();
}
std::vector<const BenchmarkEvent*>
ValidatorRunnerImpl::GetSuccessfulResultsFromStorage() {
std::vector<const BenchmarkEvent*> results;
storage_.Read();
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Benchmark event(%d).",
event->event_type());
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
results.push_back(event);
} else if (event->event_type() == BenchmarkEventType_ERROR) {
TFLITE_LOG(
TFLITE_LOG_WARNING,
"Benchmark event failed with error code (%d), signal (%d), exit code "
"(%d), stage (%d), mini benchmark error code (%d).\n",
event->error()->error_code(), event->error()->signal(),
event->error()->exit_code(), event->error()->stage(),
event->error()->mini_benchmark_error_code());
}
}
return results;
}
std::vector<FlatBufferBuilder> ValidatorRunnerImpl::GetCompletedResults() {
storage_.Read();
std::vector<FlatBufferBuilder> results;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() != BenchmarkEventType_ERROR &&
event->event_type() != BenchmarkEventType_END) {
continue;
}
BenchmarkEventT event_obj;
event->UnPackTo(&event_obj);
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
event_obj.result->ok = true;
}
FlatBufferBuilder fbb;
fbb.Finish(CreateBenchmarkEvent(fbb, &event_obj));
results.emplace_back(std::move(fbb));
}
return results;
}
int ValidatorRunnerImpl::GetNumCompletedResults() {
storage_.Read();
int num_results = 0;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() == BenchmarkEventType_ERROR ||
(event->event_type() == BenchmarkEventType_END && event->result())) {
num_results++;
}
}
return num_results;
}
MinibenchmarkStatus
ValidatorRunnerImpl::ValidationEntrypointHelper::Validate() {
#ifndef _WIN32
if (!LoadEntrypoint()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load symbol '%s': '%s'",
validation_entrypoint_name_.c_str(), dlerror());
return kMinibenchmarkValidationEntrypointSymbolNotFound;
}
return kMinibenchmarkSuccess;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
ValidatorRunnerImpl::ValidationEntrypointHelper::EntrypointFunc*
ValidatorRunnerImpl::ValidationEntrypointHelper::LoadEntrypoint() {
#ifndef _WIN32
return reinterpret_cast<int (*)(int, char**)>(
dlsym(RTLD_DEFAULT, validation_entrypoint_name_.c_str()));
#endif
return nullptr;
}
MinibenchmarkStatus ValidatorRunnerImpl::NnapiHelper::Load() {
if (nnapi_sl_) {
#ifndef _WIN32
Dl_info dl_info;
if (!nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
int status = dladdr(reinterpret_cast<void*>(
nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel),
&dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
nnapi_sl_path_ = dl_info.dli_fname;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus ValidatorRunnerImpl::GpuHelper::Load() {
if (gpu_plugin_handle_) {
#ifndef _WIN32
Dl_info dl_info;
int status = dladdr(gpu_plugin_handle_, &dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMinibenchmarkCannotLoadGpuModule;
}
gpu_so_path_ = dl_info.dli_fname;
}
#else
return kMinibenchmarkUnsupportedPlatform;
}
#endif
return kMinibenchmarkSuccess;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#include "tensorflow/lite/stderr_reporter.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_validator_runner_entrypoint.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20);
class AlwaysTrueEvaluator : public AbstractBenchmarkResultEvaluator {
public:
bool HasPassedAccuracyCheck(const BenchmarkResult& result) override {
return true;
}
};
class ValidatorRunnerImplTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
nnapi_sl_dump_path_ = helper.DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
options_.data_directory_path = ::testing::TempDir();
options_.storage_path = ::testing::TempDir() + "/storage_path.fb";
options_.validation_entrypoint_name =
"Java_org_tensorflow_lite_acceleration_validation_entrypoint";
options_.error_reporter = tflite::DefaultErrorReporter();
options_.benchmark_result_evaluator =
EmbeddedResultEvaluator::GetInstance();
options_.per_test_timeout_ms = 0;
options_.model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!options_.model_path.empty());
plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path_.empty());
}
void TearDown() override {
if (should_perform_test_) {
ASSERT_EQ(unlink(options_.storage_path.c_str()), 0);
}
}
ValidatorRunnerImpl CreateValidator() {
return ValidatorRunnerImpl(
CreateModelLoaderPath(options_), options_.storage_path,
options_.data_directory_path, options_.per_test_timeout_ms,
std::move(custom_validation_embedder_), options_.error_reporter,
options_.nnapi_sl, options_.gpu_plugin_handle,
options_.validation_entrypoint_name,
options_.benchmark_result_evaluator);
}
bool should_perform_test_;
ValidatorRunnerOptions options_{};
std::string plain_model_path_;
std::unique_ptr<CustomValidationEmbedder> custom_validation_embedder_ =
nullptr;
std::string nnapi_sl_dump_path_;
};
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithNnApiSlAndEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
InitNnApiSlInvocationStatus();
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> fake_nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_dump_path_);
ASSERT_THAT(fake_nnapi_sl.get(), ::testing::NotNull());
options_.nnapi_sl = fake_nnapi_sl->getFL5();
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(
CreateTFLiteSettings(tflite_settings[0], Delegate_NNAPI,
CreateNNAPISettings(tflite_settings[0])));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<const BenchmarkEvent*> results =
validator.GetSuccessfulResultsFromStorage();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
ASSERT_THAT(result, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_THAT(result->result()->actual_output(),
testing::Pointee(testing::SizeIs(0)));
}
EXPECT_TRUE(WasNnApiSlInvoked());
}
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithBufferModelAndCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer = g_tflite_acceleration_embedded_mobilenet_model;
options_.model_size = g_tflite_acceleration_embedded_mobilenet_model_len;
options_.model_path.clear();
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsOkWithCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsNotOkIfCustomValidationFailed) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_FALSE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest, FailIfItCannotFindNnApiSlPath) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
NnApiSLDriverImplFL5 wrong_handle_nnapi_sl{};
options_.nnapi_sl = &wrong_handle_nnapi_sl;
ValidatorRunnerImpl validator = CreateValidator();
EXPECT_EQ(validator.Init(), kMiniBenchmarkCannotLoadSupportLibrary);
}
TEST_F(ValidatorRunnerImplTest, FailWithInvalidEntrypoint) {
options_.validation_entrypoint_name = "invalid_name()";
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationEntrypointSymbolNotFound);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotLoadModel) {
options_.model_path = "invalid/path";
EXPECT_EQ(CreateValidator().Init(), kMinibenchmarkModelInitFailed);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotEmbedInputData) {
options_.model_path = plain_model_path_;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
1, std::vector<std::vector<uint8_t>>(2));
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationSubgraphBuildFailed);
}
}
}
} |
731 | cpp | tensorflow/tensorflow | libjpeg_decoder | tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_LIBJPEG_DECODER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_LIBJPEG_DECODER_H_
#include <memory.h>
#include <csetjmp>
#include <cstddef>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_common.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
Status ExtractSizeFromErrorMessage(const std::string& error_message,
size_t& expected_size);
class LibjpegDecoder {
public:
static const size_t kMaxImageHeight;
static const size_t kMaxImageWidth;
static std::unique_ptr<LibjpegDecoder> Create(Status& status);
Status DecodeImage(const tflite::StringRef& encoded,
const JpegHeader& expected_image_dimensions,
unsigned char* decoded, const size_t& decoded_size) const;
private:
explicit LibjpegDecoder(LibCHandle libc_handle)
: libc_handle_(std::move(libc_handle)) {}
class Impl {
public:
explicit Impl(size_t decompress_struct_size, const LibjpegHandle* handle);
~Impl() { jpeg_destroy_decompress(); }
Impl(const Impl&) = delete;
Impl& operator=(const Impl&) = delete;
Impl(Impl&& other) = delete;
Impl& operator=(Impl&& other) = delete;
TfLiteStatus jpeg_CreateDecompress(int version, size_t struct_size) {
safe_to_invoke_destroy_decompress_ = true;
return Run(&LibjpegHandle::jpeg_create_decompress_, version, struct_size);
}
TfLiteStatus jpeg_stdio_src(FILE* infile) {
return Run(&LibjpegHandle::jpeg_stdio_src_, infile);
}
TfLiteStatus jpeg_read_header(int& read_header_result,
boolean require_image) {
return RunAndSetResult(&LibjpegHandle::jpeg_read_header_,
&read_header_result, require_image);
}
TfLiteStatus jpeg_start_decompress(boolean& start_decompress_result) {
return RunAndSetResult(&LibjpegHandle::jpeg_start_decompress_,
&start_decompress_result);
}
TfLiteStatus jpeg_read_scanlines(unsigned int& read_scanlines_result,
JSAMPARRAY scanlines,
JDIMENSION max_lines) {
return RunAndSetResult(&LibjpegHandle::jpeg_read_scanlines_,
&read_scanlines_result, scanlines, max_lines);
}
TfLiteStatus jpeg_finish_decompress(boolean& finish_decompress_result) {
return RunAndSetResult(&LibjpegHandle::jpeg_finish_decompress_,
&finish_decompress_result);
}
TfLiteStatus jpeg_destroy_decompress() {
if (safe_to_invoke_destroy_decompress_) {
safe_to_invoke_destroy_decompress_ = false;
return Run(&LibjpegHandle::jpeg_destroy_decompress_);
}
return kTfLiteOk;
}
Status status() { return status_; }
private:
template <typename Fn, typename... Args>
TfLiteStatus Run(Fn f, Args... args) {
if (setjmp(env_)) return kTfLiteError;
(handle_->*f)(cinfo_.get(), args...);
return kTfLiteOk;
}
template <
typename Fn, typename... Args,
typename ResultType = typename std::result_of_t<Fn>,
typename = typename std::enable_if<!std::is_void<ResultType>::value> >
TfLiteStatus RunAndSetResult(Fn f, ResultType* result, Args... args) {
if (setjmp(env_)) return kTfLiteError;
*result = (handle_->*f)(cinfo_.get(), args...);
return kTfLiteOk;
}
size_t decompress_struct_size_;
const LibjpegHandle* handle_;
JpegDecompressBufferedStruct cinfo_;
struct jpeg_error_mgr jerr_;
jmp_buf env_;
static void ErrorExit(j_common_ptr cinfo);
bool safe_to_invoke_destroy_decompress_ = false;
Status status_;
};
size_t expected_size_for_decompress_struct_;
std::unique_ptr<LibjpegHandle> libjpeg_handle_;
LibCHandle libc_handle_;
};
}
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.h"
#include <setjmp.h>
#include <algorithm>
#include <cctype>
#include <cstddef>
#include <cstdio>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
const size_t LibjpegDecoder::kMaxImageHeight = 10000;
const size_t LibjpegDecoder::kMaxImageWidth = 10000;
constexpr char kSizeMismatchError[] =
"JPEG parameter struct mismatch: library thinks size is ";
LibjpegDecoder::Impl::Impl(size_t decompress_struct_size,
const LibjpegHandle* handle)
: decompress_struct_size_(decompress_struct_size),
handle_(handle),
cinfo_(decompress_struct_size) {
cinfo_.get()->err = handle->jpeg_std_error_(&jerr_);
jerr_.error_exit = ErrorExit;
cinfo_.get()->client_data = this;
}
void LibjpegDecoder::Impl::ErrorExit(j_common_ptr cinfo) {
Impl* const impl = reinterpret_cast<Impl*>(cinfo->client_data);
char message[JMSG_LENGTH_MAX];
cinfo->err->format_message(cinfo, message);
impl->status_.code = kTfLiteError;
impl->status_.error_message = message;
longjmp(impl->env_, 1);
}
Status ExtractSizeFromErrorMessage(const std::string& error_message,
size_t& expected_size) {
Status status;
static const int kExpLengthStart = strlen(kSizeMismatchError);
int end = kExpLengthStart;
while (end < error_message.length() && std::isdigit(error_message[end])) {
end++;
}
if (end > kExpLengthStart) {
expected_size = std::stoi(error_message.substr(kExpLengthStart, end));
} else {
status.code = kTfLiteError;
status.error_message =
"Couldn't parse the size from message: \'" + error_message + "\'";
}
return status;
}
std::unique_ptr<LibjpegDecoder> LibjpegDecoder::Create(Status& status) {
std::unique_ptr<LibjpegDecoder> decoder(
new LibjpegDecoder(LibCHandle::Create(status)));
if (status.code != kTfLiteOk) {
return nullptr;
}
decoder->libjpeg_handle_ = LibjpegHandle::Create(status);
if (decoder->libjpeg_handle_ == nullptr) {
return nullptr;
}
Impl impl(sizeof(jpeg_decompress_struct), decoder->libjpeg_handle_.get());
impl.jpeg_CreateDecompress(LibjpegHandle::kLibjpegVersion,
sizeof(jpeg_decompress_struct));
status = impl.status();
if (status.code == kTfLiteOk) {
decoder->expected_size_for_decompress_struct_ =
sizeof(jpeg_decompress_struct);
return decoder;
}
if (!absl::StrContains(status.error_message, kSizeMismatchError)) {
return nullptr;
}
status = ExtractSizeFromErrorMessage(
status.error_message, decoder->expected_size_for_decompress_struct_);
if (status.code != kTfLiteOk) {
return nullptr;
}
return decoder;
}
namespace {
std::string JpegHeaderToString(const JpegHeader& header) {
return "(" + std::to_string(header.height) + ", " +
std::to_string(header.width) + ", " + std::to_string(header.channels) +
", " + std::to_string(header.bits_per_sample) + ")";
}
}
Status LibjpegDecoder::DecodeImage(const tflite::StringRef& encoded,
const JpegHeader& expected_image_dimensions,
unsigned char* decoded,
const size_t& decoded_size) const {
if (expected_image_dimensions.bits_per_sample != 8) {
return {kTfLiteError, "Supporting only images with 8 bits per sample"};
}
if (expected_image_dimensions.channels != 1 &&
expected_image_dimensions.channels != 3) {
return {kTfLiteError, "Supporting only images with 1 or 3 channels"};
}
if (expected_image_dimensions.width > kMaxImageWidth ||
expected_image_dimensions.height > kMaxImageHeight) {
return {kTfLiteError, "Image is too big, dimensions (" +
std::to_string(expected_image_dimensions.width) +
"," +
std::to_string(expected_image_dimensions.width) +
") larger than the maximum allowed (" +
std::to_string(kMaxImageWidth) + ", " +
std::to_string(kMaxImageHeight) + ")"};
}
JpegHeader header;
Status read_header_status = ReadJpegHeader(encoded, &header);
if (read_header_status.code != kTfLiteOk) {
return read_header_status;
}
if (expected_image_dimensions.channels != header.channels ||
expected_image_dimensions.width != header.width ||
expected_image_dimensions.height != header.height ||
expected_image_dimensions.bits_per_sample != header.bits_per_sample) {
return {kTfLiteError, "Decoded image size " + JpegHeaderToString(header) +
" is different from provided image size " +
JpegHeaderToString(expected_image_dimensions)};
}
size_t header_image_size = static_cast<size_t>(header.width) *
static_cast<size_t>(header.height) *
static_cast<size_t>(header.channels);
if (header_image_size != decoded_size) {
return {kTfLiteError, "Size of buffer(" + std::to_string(decoded_size) +
") for storing decoded image must be equal to "
"the size of decoded image(" +
std::to_string(header_image_size) + ")."};
}
char* image_buffer = const_cast<char*>(encoded.str);
size_t image_size = encoded.len;
std::unique_ptr<FILE, std::function<void(FILE*)>> file(
libc_handle_.fmemopen(image_buffer, image_size, "r"),
[](FILE* f) { fclose(f); });
if (file == nullptr) {
return {kTfLiteError, "Fmemopen failed."};
}
Impl impl(expected_size_for_decompress_struct_, libjpeg_handle_.get());
if (impl.jpeg_CreateDecompress(LibjpegHandle::kLibjpegVersion,
expected_size_for_decompress_struct_)) {
return impl.status();
}
if (impl.jpeg_stdio_src(file.get())) {
return impl.status();
}
int read_header_result = 0;
if (impl.jpeg_read_header(read_header_result, true) != kTfLiteOk) {
return impl.status();
}
if (read_header_result != JPEG_HEADER_OK) {
return {kTfLiteError, "Failed call jpeg_read_header"};
}
boolean start_decompress_result = false;
if (impl.jpeg_start_decompress(start_decompress_result) != kTfLiteOk) {
return impl.status();
}
if (!start_decompress_result) {
return {kTfLiteError, "Failed call jpeg_start_decompress_"};
}
size_t height = header.height;
size_t row_stride = header.width * header.channels;
const size_t kMaxImageSize = JPEG_MAX_DIMENSION * 4;
std::vector<unsigned char> decode_buffer(kMaxImageSize);
unsigned char* buffer_array[1];
buffer_array[0] = decode_buffer.data();
size_t decoded_offset = 0;
while (height--) {
unsigned int num_of_scanlines_read = 0;
if (impl.jpeg_read_scanlines(num_of_scanlines_read, buffer_array, 1) !=
kTfLiteOk) {
return impl.status();
}
if (num_of_scanlines_read != 1) {
return {kTfLiteError, "Expected " + std::to_string(header.height) +
" lines but found only " +
std::to_string(header.height - height) +
" read scanlines is " +
std::to_string(num_of_scanlines_read)};
}
std::copy_n(buffer_array[0], row_stride, decoded + decoded_offset);
decoded_offset += row_stride;
}
boolean finish_decompress_result = false;
if (impl.jpeg_finish_decompress(finish_decompress_result) != kTfLiteOk) {
return impl.status();
}
if (!finish_decompress_result) {
return {kTfLiteError, "Failed call jpeg_finish_decompress_"};
}
if (impl.jpeg_destroy_decompress() != kTfLiteOk) {
return impl.status();
}
return impl.status();
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.h"
#include <stddef.h>
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_chessboard_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_snow_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_test_card_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder_test_helper.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
using testing::IsEmpty;
using testing::NotNull;
constexpr JpegHeader kExpectedImageDimensions{
.height = 300, .width = 250, .channels = 3};
constexpr int kDecodedSize = kExpectedImageDimensions.height *
kExpectedImageDimensions.width *
kExpectedImageDimensions.channels;
TEST(LibjpegDecoderTest, InitShouldSucceedOnAndroid) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_EQ(status.code, kTfLiteOk);
EXPECT_THAT(status.error_message, IsEmpty());
}
TEST(LibjpegDecoderTest, DecodingChessboardShouldSucceedOnAndroid) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
ASSERT_THAT(decoder, NotNull());
tflite::StringRef string_ref = {
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
static_cast<size_t>(g_tflite_acceleration_chessboard_jpeg_len)};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(string_ref, kExpectedImageDimensions, decoded,
kDecodedSize);
ASSERT_EQ(status.error_message, "");
ASSERT_EQ(status.code, kTfLiteOk);
std::vector<uint8_t> decoded_vec(decoded, decoded + kDecodedSize);
EXPECT_THAT(decoded_vec, HasChessboardPatternWithTolerance(12));
}
TEST(LibjpegDecoderTest, DecodingRainbowTestCardShouldSucceedOnAndroid) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(string_ref, kExpectedImageDimensions, decoded,
kDecodedSize);
ASSERT_EQ(status.error_message, "");
ASSERT_EQ(status.code, kTfLiteOk);
std::vector<uint8_t> decoded_vec(decoded, decoded + kDecodedSize);
EXPECT_THAT(decoded_vec, HasRainbowPatternWithTolerance(5));
}
TEST(LibjpegDecoderTest, ErrorsFromJpegLayerAreReturnedToCaller) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string str = "this is not a jpeg image";
tflite::StringRef encoded = {str.c_str(), str.length()};
unsigned char decoded_image[12];
status = decoder->DecodeImage(encoded, kExpectedImageDimensions,
decoded_image, 12);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message, "Not a valid JPEG image.");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenDecodeBufferIsSmall) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
const int decoded_size = 100;
unsigned char decoded[decoded_size];
status = decoder->DecodeImage(string_ref, kExpectedImageDimensions, decoded,
decoded_size);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Size of buffer(100) for storing decoded image must be equal to "
"the size of decoded image(225000).");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenImageDimensionsDifferFromExpected) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(string_ref,
{.height = 300, .width = 250, .channels = 1},
decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(
status.error_message,
"Decoded image size (300, 250, 3, 8) is different from provided image "
"size (300, 250, 1, 8)");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenImageDimensionsAreOverThreshold) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef origin_string_ref = {encoded.c_str(), encoded.length()};
const JpegHeader kHeader{
.height = static_cast<int>(LibjpegDecoder::kMaxImageHeight + 1),
.width = static_cast<int>(LibjpegDecoder::kMaxImageWidth + 1),
.channels = 3};
const size_t decoded_size = static_cast<size_t>(kHeader.height) *
static_cast<size_t>(kHeader.width) *
static_cast<size_t>(kHeader.channels);
std::string altered_image;
Status alter_header_status =
BuildImageWithNewHeader(origin_string_ref, kHeader, altered_image);
ASSERT_EQ(alter_header_status.code, kTfLiteOk);
tflite::StringRef altered_string_ref = {altered_image.c_str(),
altered_image.length()};
std::vector<unsigned char> decoded(decoded_size);
status = decoder->DecodeImage(altered_string_ref, kHeader, decoded.data(),
decoded_size);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Image is too big, dimensions (" + std::to_string(kHeader.width) +
"," + std::to_string(kHeader.width) +
") larger than the maximum allowed (" +
std::to_string(LibjpegDecoder::kMaxImageHeight) + ", " +
std::to_string(LibjpegDecoder::kMaxImageWidth) + ")");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenImageHasUnsupportedNumberOfChannels) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[300 * 250 * 4];
const JpegHeader kHeader{.height = 300, .width = 250, .channels = 4};
status = decoder->DecodeImage(string_ref, kHeader, decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Supporting only images with 1 or 3 channels");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenExpectedBitPerSampleIsNot8) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(
string_ref,
{.height = 300, .width = 250, .channels = 3, .bits_per_sample = 4},
decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Supporting only images with 8 bits per sample");
}
TEST(LibjpegDecoderTest, DoesNotDecodeBeyondWhatIsSpecifiedInHeader) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string origin_encoded_img(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef origin_string_ref = {origin_encoded_img.c_str(),
origin_encoded_img.length()};
JpegHeader undersized_image_header = {
.height = kExpectedImageDimensions.height / 2,
.width = kExpectedImageDimensions.width / 2,
.channels = kExpectedImageDimensions.channels};
std::string altered_image;
Status alter_header_status = BuildImageWithNewHeader(
origin_string_ref, undersized_image_header, altered_image);
ASSERT_EQ(alter_header_status.code, kTfLiteOk);
tflite::StringRef altered_string_ref{altered_image.c_str(),
altered_image.length()};
unsigned char decoded[kDecodedSize / 4];
status = decoder->DecodeImage(altered_string_ref, undersized_image_header,
decoded, kDecodedSize / 4);
EXPECT_EQ(status.code, kTfLiteOk);
}
TEST(LibjpegDecoderTest, CanReadImagesWithVeryLargeRows) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string origin_encoded_img(
reinterpret_cast<const char*>(g_tflite_acceleration_snow_jpeg),
g_tflite_acceleration_snow_jpeg_len);
tflite::StringRef origin_string_ref = {origin_encoded_img.c_str(),
origin_encoded_img.length()};
JpegHeader one_long_row_image_header = {
.height = 1,
.width = static_cast<int>(LibjpegDecoder::kMaxImageWidth),
.channels = kExpectedImageDimensions.channels};
std::string altered_image;
Status alter_header_status = BuildImageWithNewHeader(
origin_string_ref, one_long_row_image_header, altered_image);
ASSERT_EQ(alter_header_status.code, kTfLiteOk);
tflite::StringRef altered_string_ref = {altered_image.c_str(),
altered_image.length()};
const size_t kImageSize = LibjpegDecoder::kMaxImageWidth * 3;
std::vector<unsigned char> decoded(kImageSize);
status = decoder->DecodeImage(altered_string_ref, one_long_row_image_header,
decoded.data(), kImageSize);
EXPECT_EQ(status.code, kTfLiteOk);
}
TEST(LibjpegDecoderTest, FailDecodingAnImageWithUnexpectedEofInDataStream) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string img(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef truncated_image_ref = {img.c_str(), img.length() - 100};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(truncated_image_ref, kExpectedImageDimensions,
decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message, "Not a valid JPEG image.");
}
TEST(LibjpegDecoderTest, JpegErrorMsgParsingForValidMsg) {
size_t extracted_size;
Status status = ExtractSizeFromErrorMessage(
"JPEG parameter struct mismatch: library thinks size is 480, caller "
"expects 464.",
extracted_size);
ASSERT_EQ(status.code, kTfLiteOk);
EXPECT_EQ(extracted_size, 480);
}
TEST(LibjpegDecoderTest, JpegErrorMsgParsingForMaformedMsg) {
size_t extracted_size;
std::string err_msg =
"JPEG parameter struct mismatch: library thinks size is abcde, caller "
"expects 464.";
Status status = ExtractSizeFromErrorMessage(err_msg, extracted_size);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Couldn't parse the size from message: '" + err_msg + "'");
}
}
}
}
} |
732 | cpp | tensorflow/tensorflow | libc_handle | tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_LIBC_HANDLE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_LIBC_HANDLE_H_
#ifdef __ANDROID__
#include <dlfcn.h>
#endif
#include <cstdio>
#include <memory>
#include <utility>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
class LibCHandle {
public:
static LibCHandle Create(Status& status);
LibCHandle(LibCHandle const&) = delete;
LibCHandle& operator=(const LibCHandle&) = delete;
LibCHandle(LibCHandle&& other)
: libc_(std::exchange(other.libc_, nullptr)),
fmemopen_(std::exchange(other.fmemopen_, nullptr)) {}
LibCHandle& operator=(LibCHandle&& other) {
if (&other != this) {
CloseHandle();
libc_ = std::exchange(other.libc_, nullptr);
fmemopen_ = std::exchange(other.fmemopen_, nullptr);
}
return *this;
}
~LibCHandle() { CloseHandle(); }
FILE* fmemopen(void* buf, size_t size, const char* mode) const;
private:
using FmemopenPtr = FILE* (*)(void*, size_t, const char*);
LibCHandle(void* libc, FmemopenPtr ptr) : libc_(libc), fmemopen_(ptr) {}
void CloseHandle() {
#ifdef __ANDROID__
if (libc_ != nullptr) {
dlclose(libc_);
}
#endif
}
void* libc_ = nullptr;
FmemopenPtr fmemopen_ = nullptr;
};
}
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#endif
#include <stdio.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
LibCHandle LibCHandle::Create(Status &status) {
#ifndef __ANDROID__
#ifndef _WIN32
return LibCHandle(nullptr, ::fmemopen);
#else
status = {kTfLiteError, "Windows not supported."};
return LibCHandle(nullptr, nullptr);
#endif
#else
void *libc = nullptr;
FmemopenPtr fmemopen_ptr = nullptr;
if (!(libc = dlopen("libc.so", RTLD_NOW | RTLD_LOCAL))) {
status = {kTfLiteError,
"Failed to load the libc dynamic shared object library."};
return LibCHandle(nullptr, nullptr);
}
if (!(fmemopen_ptr =
reinterpret_cast<FmemopenPtr>(dlsym(libc, "fmemopen")))) {
status = {kTfLiteError, "Failed to dynamically load the method: fmemopen"};
return LibCHandle(nullptr, nullptr);
}
status = {kTfLiteOk, ""};
return LibCHandle(libc, fmemopen_ptr);
#endif
}
FILE *LibCHandle::fmemopen(void *buf, size_t size, const char *mode) const {
return fmemopen_(buf, size, mode);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
TEST(LibCHandleTest, LoadingSucceedsAndroidPlatforms) {
Status status;
LibCHandle handle = LibCHandle::Create(status);
EXPECT_EQ(status.error_message, "");
EXPECT_EQ(status.code, kTfLiteOk);
}
}
}
}
} |
733 | cpp | tensorflow/tensorflow | fb_storage | tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_FB_STORAGE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_FB_STORAGE_H_
#include <errno.h>
#include <cstring>
#include <string>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "flatbuffers/base.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/stderr_reporter.h"
namespace tflite {
namespace acceleration {
class FileStorage {
public:
FileStorage(absl::string_view path, ErrorReporter* error_reporter);
MinibenchmarkStatus ReadFileIntoBuffer();
MinibenchmarkStatus AppendDataToFile(absl::string_view data);
protected:
std::string path_;
ErrorReporter* error_reporter_;
std::string buffer_;
};
ABSL_CONST_INIT extern const char kFlatbufferStorageIdentifier[];
template <typename T>
class FlatbufferStorage : protected FileStorage {
public:
explicit FlatbufferStorage(
absl::string_view path,
ErrorReporter* error_reporter = DefaultErrorReporter())
: FileStorage(path, error_reporter) {}
MinibenchmarkStatus Read();
size_t Count() { return contents_.size(); }
const T* Get(size_t i) { return contents_[i]; }
MinibenchmarkStatus Append(flatbuffers::FlatBufferBuilder* fbb,
flatbuffers::Offset<T> object);
private:
std::vector<const T*> contents_;
};
template <typename T>
MinibenchmarkStatus FlatbufferStorage<T>::Read() {
contents_.clear();
MinibenchmarkStatus status = ReadFileIntoBuffer();
if (status != kMinibenchmarkSuccess) {
return status;
}
size_t remaining_size = buffer_.size();
const uint8_t* current_ptr =
reinterpret_cast<const uint8_t*>(buffer_.c_str());
while (remaining_size != 0) {
if (remaining_size < sizeof(flatbuffers::uoffset_t)) {
TF_LITE_REPORT_ERROR(
error_reporter_,
"Corrupt size-prefixed flatbuffer file %s (remaining size less than "
"size of uoffset_t)",
path_.c_str());
return kMinibenchmarkCorruptSizePrefixedFlatbufferFile;
}
flatbuffers::uoffset_t current_size =
flatbuffers::ReadScalar<flatbuffers::uoffset_t>(current_ptr);
flatbuffers::Verifier verifier(
current_ptr, sizeof(flatbuffers::uoffset_t) + current_size);
if (!verifier.VerifySizePrefixedBuffer<T>(kFlatbufferStorageIdentifier)) {
TF_LITE_REPORT_ERROR(
error_reporter_,
"Corrupt size-prefixed flatbuffer file %s (verifier returned false)",
path_.c_str());
return kMinibenchmarkCorruptSizePrefixedFlatbufferFile;
}
contents_.push_back(flatbuffers::GetSizePrefixedRoot<T>(current_ptr));
size_t consumed = sizeof(flatbuffers::uoffset_t) + current_size;
if (remaining_size < consumed) {
TF_LITE_REPORT_ERROR(
error_reporter_,
"Corrupt size-prefixed flatbuffer file %s (mismatched size "
"calculation)",
path_.c_str());
return kMinibenchmarkCorruptSizePrefixedFlatbufferFile;
}
remaining_size -= consumed;
current_ptr += consumed;
}
return kMinibenchmarkSuccess;
}
template <typename T>
MinibenchmarkStatus FlatbufferStorage<T>::Append(
flatbuffers::FlatBufferBuilder* fbb, flatbuffers::Offset<T> object) {
contents_.clear();
fbb->FinishSizePrefixed(object, kFlatbufferStorageIdentifier);
const char* data = reinterpret_cast<const char*>(fbb->GetBufferPointer());
size_t size = fbb->GetSize();
MinibenchmarkStatus status = AppendDataToFile({data, size});
if (status != kMinibenchmarkSuccess) {
return status;
}
return Read();
}
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include <fcntl.h>
#include <string.h>
#ifndef _WIN32
#include <sys/file.h>
#include <unistd.h>
#endif
#include <fstream>
#include <sstream>
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#ifndef TEMP_FAILURE_RETRY
#ifdef __ANDROID__
#error "TEMP_FAILURE_RETRY not set although on Android"
#else
#define TEMP_FAILURE_RETRY(exp) exp
#endif
#endif
namespace tflite {
namespace acceleration {
FileStorage::FileStorage(absl::string_view path, ErrorReporter* error_reporter)
: path_(path), error_reporter_(error_reporter) {}
MinibenchmarkStatus FileStorage::ReadFileIntoBuffer() {
#ifndef _WIN32
buffer_.clear();
int fd = TEMP_FAILURE_RETRY(open(path_.c_str(), O_RDONLY | O_CLOEXEC, 0600));
int open_error_no = errno;
if (fd < 0) {
int fd = TEMP_FAILURE_RETRY(
open(path_.c_str(), O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600));
if (fd >= 0) {
close(fd);
return kMinibenchmarkSuccess;
}
int create_error_no = errno;
TF_LITE_REPORT_ERROR(
error_reporter_,
"Could not open %s for reading: %s, creating failed as well: %s",
path_.c_str(), std::strerror(open_error_no),
std::strerror(create_error_no));
return kMinibenchmarkCantCreateStorageFile;
}
int lock_status = flock(fd, LOCK_EX);
int lock_error_no = errno;
if (lock_status < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not flock %s: %s",
path_.c_str(), std::strerror(lock_error_no));
return kMinibenchmarkFlockingStorageFileFailed;
}
char buffer[512];
while (true) {
int bytes_read = TEMP_FAILURE_RETRY(read(fd, buffer, 512));
int read_error_no = errno;
if (bytes_read == 0) {
close(fd);
return kMinibenchmarkSuccess;
} else if (bytes_read < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Error reading %s: %s",
path_.c_str(), std::strerror(read_error_no));
return kMinibenchmarkErrorReadingStorageFile;
} else {
buffer_.append(buffer, bytes_read);
}
}
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
MinibenchmarkStatus FileStorage::AppendDataToFile(absl::string_view data) {
#ifndef _WIN32
int fd = TEMP_FAILURE_RETRY(
open(path_.c_str(), O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600));
if (fd < 0) {
int error_no = errno;
TF_LITE_REPORT_ERROR(error_reporter_, "Could not open %s for writing: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkFailedToOpenStorageFileForWriting;
}
int lock_status = flock(fd, LOCK_EX);
int lock_error_no = errno;
if (lock_status < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not flock %s: %s",
path_.c_str(), std::strerror(lock_error_no));
return kMinibenchmarkFlockingStorageFileFailed;
}
absl::string_view bytes = data;
while (!bytes.empty()) {
ssize_t bytes_written =
TEMP_FAILURE_RETRY(write(fd, bytes.data(), bytes.size()));
if (bytes_written < 0) {
int error_no = errno;
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not write to %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorWritingStorageFile;
}
bytes.remove_prefix(bytes_written);
}
if (TEMP_FAILURE_RETRY(fsync(fd)) < 0) {
int error_no = errno;
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to fsync %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorFsyncingStorageFile;
}
if (TEMP_FAILURE_RETRY(close(fd)) < 0) {
int error_no = errno;
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to close %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorClosingStorageFile;
}
return kMinibenchmarkSuccess;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
const char kFlatbufferStorageIdentifier[] = "STO1";
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include <algorithm>
#include <string>
#include <thread>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/c_api_types.h"
namespace tflite {
namespace acceleration {
namespace {
std::string GetTemporaryDirectory() {
#ifdef __ANDROID__
return "/data/local/tmp";
#else
if (getenv("TEST_TMPDIR")) {
return getenv("TEST_TMPDIR");
}
if (getenv("TEMP")) {
return getenv("TEMP");
}
return ".";
#endif
}
std::string GetStoragePath() {
std::string path = GetTemporaryDirectory() + "/storage.fb";
unlink(path.c_str());
return path;
}
TEST(FlatbufferStorageTest, AppendAndReadOneItem) {
std::string path = GetStoragePath();
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), 0);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
storage = FlatbufferStorage<BenchmarkEvent>(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
}
TEST(FlatbufferStorageTest, AppendAndReadThreeItems) {
std::string path = GetStoragePath();
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), 0);
for (auto event : {BenchmarkEventType_START, BenchmarkEventType_ERROR,
BenchmarkEventType_END}) {
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> object =
CreateBenchmarkEvent(fbb, 0, event);
EXPECT_EQ(storage.Append(&fbb, object), kMinibenchmarkSuccess);
}
ASSERT_EQ(storage.Count(), 3);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
EXPECT_EQ(storage.Get(1)->event_type(), BenchmarkEventType_ERROR);
EXPECT_EQ(storage.Get(2)->event_type(), BenchmarkEventType_END);
storage = FlatbufferStorage<BenchmarkEvent>(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 3);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
EXPECT_EQ(storage.Get(1)->event_type(), BenchmarkEventType_ERROR);
EXPECT_EQ(storage.Get(2)->event_type(), BenchmarkEventType_END);
}
TEST(FlatbufferStorageTest, PathDoesntExist) {
std::string path = GetTemporaryDirectory() + "/nosuchdirectory/storage.pb";
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkCantCreateStorageFile);
}
#ifndef __ANDROID__
TEST(FlatbufferStorageTest, WriteFailureResetsStorage) {
std::string path = GetStoragePath();
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
chmod(path.c_str(), 0444);
EXPECT_EQ(storage.Append(&fbb, o),
kMinibenchmarkFailedToOpenStorageFileForWriting);
ASSERT_EQ(storage.Count(), 0);
}
#endif
TEST(FlatbufferStorageTest, Locking) {
std::string path = GetStoragePath();
std::vector<std::thread> threads;
const int kNumThreads = 4;
const int kIterations = 10;
threads.reserve(kNumThreads);
for (int i = 0; i < kNumThreads; i++) {
threads.push_back(std::thread([path]() {
for (int j = 0; j < kIterations; j++) {
FlatbufferStorage<BenchmarkEvent> storage(path);
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
}
}));
}
std::for_each(threads.begin(), threads.end(),
[](std::thread& t) { t.join(); });
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), kNumThreads * kIterations);
}
}
}
} |
734 | cpp | tensorflow/tensorflow | blocking_validator_runner | tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_BLOCKING_VALIDATOR_RUNNER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_BLOCKING_VALIDATOR_RUNNER_H_
#include <memory>
#include <string>
#include <vector>
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
namespace tflite {
namespace acceleration {
class BlockingValidatorRunner {
public:
explicit BlockingValidatorRunner(const ValidatorRunnerOptions& options);
MinibenchmarkStatus Init();
std::vector<flatbuffers::FlatBufferBuilder> TriggerValidation(
const std::vector<const TFLiteSettings*>& for_settings);
private:
int per_test_timeout_ms_ = 0;
const std::string storage_path_base_;
std::unique_ptr<ValidatorRunnerImpl> validator_runner_impl_;
};
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20);
std::string GenerateRandomString() {
static const char charset[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
const int size = 10;
std::string result;
result.resize(size);
for (int i = 0; i < size; ++i) {
result[i] = charset[rand() % (sizeof(charset) - 1)];
}
return result;
}
}
BlockingValidatorRunner::BlockingValidatorRunner(
const ValidatorRunnerOptions& options)
: per_test_timeout_ms_(options.per_test_timeout_ms),
storage_path_base_(options.storage_path) {
validator_runner_impl_ = std::make_unique<ValidatorRunnerImpl>(
CreateModelLoaderPath(options), options.storage_path,
options.data_directory_path, options.per_test_timeout_ms,
options.custom_input_data.empty()
? nullptr
: std::make_unique<CustomValidationEmbedder>(
options.custom_input_batch_size, options.custom_input_data,
options.error_reporter),
options.error_reporter, options.nnapi_sl, options.gpu_plugin_handle,
options.validation_entrypoint_name, options.benchmark_result_evaluator);
}
MinibenchmarkStatus BlockingValidatorRunner::Init() {
return validator_runner_impl_->Init();
}
std::vector<FlatBufferBuilder> BlockingValidatorRunner::TriggerValidation(
const std::vector<const TFLiteSettings*>& for_settings) {
if (for_settings.empty()) {
return {};
}
std::string storage_path =
absl::StrCat(storage_path_base_, ".", GenerateRandomString());
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Validation storage path: %s",
storage_path.c_str());
std::vector<flatbuffers::FlatBufferBuilder> to_be_run;
std::vector<TFLiteSettingsT> for_settings_obj;
for_settings_obj.reserve(for_settings.size());
for (auto settings : for_settings) {
TFLiteSettingsT tflite_settings;
settings->UnPackTo(&tflite_settings);
flatbuffers::FlatBufferBuilder copy;
copy.Finish(CreateTFLiteSettings(copy, &tflite_settings));
to_be_run.emplace_back(std::move(copy));
for_settings_obj.emplace_back(tflite_settings);
}
validator_runner_impl_->TriggerValidationAsync(std::move(to_be_run),
storage_path);
int64_t total_timeout_ms = per_test_timeout_ms_ * (1 + for_settings.size());
int64_t deadline_us = Validator::BootTimeMicros() + total_timeout_ms * 1000;
bool within_timeout = true;
while ((validator_runner_impl_->GetNumCompletedResults()) <
for_settings.size() &&
(within_timeout = Validator::BootTimeMicros() < deadline_us)) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results =
validator_runner_impl_->GetCompletedResults();
if (!within_timeout) {
TFLITE_LOG_PROD(
TFLITE_LOG_WARNING,
"Validation timed out after %ld ms. Return before all tests finished.",
total_timeout_ms);
} else if (for_settings.size() != results.size()) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Validation completed.Started benchmarking for %d "
"TFLiteSettings, received %d results.",
for_settings.size(), results.size());
}
std::vector<TFLiteSettingsT> result_settings;
result_settings.reserve(results.size());
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
TFLiteSettingsT event_settings;
event->tflite_settings()->UnPackTo(&event_settings);
result_settings.emplace_back(std::move(event_settings));
}
for (auto& settings_obj : for_settings_obj) {
auto result_it =
std::find(result_settings.begin(), result_settings.end(), settings_obj);
if (result_it == result_settings.end()) {
FlatBufferBuilder fbb;
fbb.Finish(CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN,
0, 0,
0,
kMinibenchmarkCompletionEventMissing),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
results.emplace_back(std::move(fbb));
}
}
(void)unlink(storage_path.c_str());
return results;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h"
#include <fcntl.h>
#include <iostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
class CustomResultEvaluator : public AbstractBenchmarkResultEvaluator {
public:
bool HasPassedAccuracyCheck(const BenchmarkResult& result) override {
return true;
}
};
class BlockingValidatorRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
options_.model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!options_.model_path.empty());
options_.data_directory_path = ::testing::TempDir();
options_.storage_path =
absl::StrCat(::testing::TempDir(), "storage_path.fb.1");
options_.per_test_timeout_ms = 5000;
plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
}
std::string plain_model_path_;
ValidatorRunnerOptions options_;
bool should_perform_test_ = true;
};
TEST_F(BlockingValidatorRunnerTest, SucceedWithEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithFdCloexecEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_fd = open(options_.model_path.c_str(), O_RDONLY | O_CLOEXEC);
ASSERT_GE(options_.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0);
options_.model_size = stat_buf.st_size;
options_.model_offset = 0;
options_.model_path.clear();
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithBufferModel) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer =
g_tflite_acceleration_embedded_mobilenet_validation_model;
options_.model_size =
g_tflite_acceleration_embedded_mobilenet_validation_model_len;
options_.model_path.clear();
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithFdModelCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_path.clear();
options_.model_fd = open(plain_model_path_.c_str(), O_RDONLY);
ASSERT_GE(options_.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0);
options_.model_size = stat_buf.st_size;
options_.model_offset = 0;
options_.custom_input_batch_size = 3;
options_.custom_input_data = {std::vector<uint8_t>(3 * 224 * 224 * 3, 1)};
CustomResultEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_XNNPACK));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWhenRunningMultipleTimes) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
int num_runs = 3;
for (int i = 0; i < num_runs; i++) {
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer()),
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
}
TEST_F(BlockingValidatorRunnerTest, ReturnErrorWhenTimedOut) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.per_test_timeout_ms = 50;
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::SizeIs(1));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_ERROR);
ASSERT_NE(nullptr, event->error());
EXPECT_THAT(event->error()->mini_benchmark_error_code(),
testing::AnyOf(kMinibenchmarkCommandTimedOut,
kMinibenchmarkCompletionEventMissing));
}
}
}
}
} |
735 | cpp | tensorflow/tensorflow | big_little_affinity | tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_BIG_LITTLE_AFFINITY_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_BIG_LITTLE_AFFINITY_H_
#include <cstdint>
namespace tflite {
namespace acceleration {
struct BigLittleAffinity {
uint16_t big_core_affinity = 0;
uint16_t little_core_affinity = 0;
};
BigLittleAffinity GetAffinity();
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <set>
#include "include/cpuinfo.h"
namespace tflite {
namespace acceleration {
namespace {
bool IsInOrderArch(cpuinfo_uarch arch) {
switch (arch) {
case cpuinfo_uarch_cortex_a53:
case cpuinfo_uarch_cortex_a55r0:
case cpuinfo_uarch_cortex_a55:
case cpuinfo_uarch_cortex_a57:
return true;
default:
return false;
}
return false;
}
}
BigLittleAffinity GetAffinity() {
BigLittleAffinity affinity;
if (!cpuinfo_initialize()) {
return affinity;
}
std::map<uint32_t, uint64_t> cluster_to_max_frequency;
uint64_t smallest_max_frequency = UINT64_MAX;
uint64_t largest_max_frequency = 0;
uint64_t processors_count = cpuinfo_get_processors_count();
for (auto i = 0; i < processors_count; i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
if (processor->core->frequency > 0) {
cluster_to_max_frequency[processor->cluster->cluster_id] =
processor->core->frequency;
smallest_max_frequency =
std::min(smallest_max_frequency, processor->core->frequency);
largest_max_frequency =
std::max(largest_max_frequency, processor->core->frequency);
}
}
int count_of_processors_with_largest_max_frequency = 0;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
uint64_t max_frequency =
cluster_to_max_frequency[processor->cluster->cluster_id];
if (max_frequency == largest_max_frequency) {
++count_of_processors_with_largest_max_frequency;
}
}
std::set<cpuinfo_uarch> archs;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
uint64_t max_frequency =
cluster_to_max_frequency[processor->cluster->cluster_id];
bool is_little;
archs.insert(processor->core->uarch);
if (count_of_processors_with_largest_max_frequency ==
cpuinfo_get_processors_count()) {
is_little = IsInOrderArch(processor->core->uarch);
} else if (count_of_processors_with_largest_max_frequency == 2) {
is_little = (max_frequency != largest_max_frequency);
} else {
is_little = (max_frequency == smallest_max_frequency);
}
#ifdef __ANDROID__
if (is_little) {
affinity.little_core_affinity |= (0x1 << processor->linux_id);
} else {
affinity.big_core_affinity |= (0x1 << processor->linux_id);
}
#endif
}
if (cluster_to_max_frequency.size() == 1) {
affinity.big_core_affinity = affinity.little_core_affinity =
std::max(affinity.big_core_affinity, affinity.little_core_affinity);
} else if (count_of_processors_with_largest_max_frequency ==
cpuinfo_get_processors_count() &&
archs.size() == 1) {
affinity.big_core_affinity = affinity.little_core_affinity =
std::max(affinity.big_core_affinity, affinity.little_core_affinity);
}
return affinity;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.h"
#include <cstdint>
#include <map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "include/cpuinfo.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
namespace tflite {
namespace acceleration {
namespace {
TEST(BigLittle, CheckBasics) {
ASSERT_TRUE(cpuinfo_initialize());
auto processors_count = cpuinfo_get_processors_count();
ASSERT_GT(processors_count, 0);
#if defined(__ANDROID__)
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
if (android_info.is_emulator) {
std::cout << "Running on emulator\n";
return;
} else {
std::cout << "Running on hardware\n";
}
ASSERT_TRUE(status.ok());
std::map<uint32_t, uint64_t> cluster_to_max_frequency;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
if (processor->core->frequency > 0) {
cluster_to_max_frequency[processor->cluster->cluster_id] =
processor->core->frequency;
}
}
EXPECT_GT(cluster_to_max_frequency.size(), 0);
EXPECT_LE(cluster_to_max_frequency.size(), 3);
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
EXPECT_TRUE(cluster_to_max_frequency.find(processor->cluster->cluster_id) !=
cluster_to_max_frequency.end());
}
BigLittleAffinity affinity = GetAffinity();
EXPECT_GT(affinity.little_core_affinity, 0);
EXPECT_GT(affinity.big_core_affinity, 0);
std::cout << "Little core affinity: " << std::hex
<< affinity.little_core_affinity << std::endl;
std::cout << "Big core affinity: " << std::hex << affinity.big_core_affinity
<< std::endl;
#endif
}
}
}
} |
736 | cpp | tensorflow/tensorflow | runner | tensorflow/lite/experimental/acceleration/mini_benchmark/runner.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/runner_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_RUNNER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_RUNNER_H_
#include <string>
#include <vector>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/stderr_reporter.h"
namespace tflite {
namespace acceleration {
class ProcessRunner {
public:
ProcessRunner(const std::string& temporary_path,
const std::string& function_name,
int (*function_pointer)(int argc, char** argv),
int timeout_millisec = 0,
ErrorReporter* error_reporter = tflite::DefaultErrorReporter())
: temporary_path_(temporary_path),
function_name_(function_name),
function_pointer_(reinterpret_cast<void*>(function_pointer)),
timeout_millisec_(timeout_millisec),
error_reporter_(error_reporter) {}
MinibenchmarkStatus Init();
MinibenchmarkStatus Run(const Allocation* model_allocation,
const std::vector<std::string>& args,
std::string* output, int* exitcode, int* signal);
ProcessRunner(ProcessRunner&) = delete;
ProcessRunner& operator=(const ProcessRunner&) = delete;
private:
#ifdef TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS
int RunInprocess(const Allocation* model_allocation,
const std::vector<std::string>& args);
#endif
#ifndef _WIN32
bool KillProcessWhenTimedOut(FILE* fstream);
#endif
std::string temporary_path_;
std::string function_name_;
void* function_pointer_;
std::string runner_path_;
std::string soname_;
int timeout_millisec_;
ErrorReporter* error_reporter_;
};
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#ifndef TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS
#include <dlfcn.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#ifndef _WIN32
#include <poll.h>
#include <signal.h>
#include <unistd.h>
#endif
#include <cstdlib>
#include <fstream>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#if defined(__ANDROID__) && !defined(TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS)
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_runner_executable.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
std::string ShellEscape(const std::string& src);
}
MinibenchmarkStatus ProcessRunner::Init() {
if (!function_pointer_) {
return kMinibenchmarkPreconditionNotMet;
}
#if !defined(__ANDROID__) || defined(TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS)
return kMinibenchmarkSuccess;
#else
tflite::acceleration::AndroidInfo android_info;
if (!tflite::acceleration::RequestAndroidInfo(&android_info).ok()) {
return kMinibenchmarkRequestAndroidInfoFailed;
}
if (android_info.android_sdk_version.length() < 2 ||
android_info.android_sdk_version < "23") {
return kMinibenchmarkUnsupportedPlatform;
}
std::string soname;
Dl_info dl_info;
int status = dladdr(function_pointer_, &dl_info);
if (status != 0) {
if (dl_info.dli_fname) {
soname = dl_info.dli_fname;
} else {
return kMinibenchmarkDliFnameWasNull;
}
} else {
return kMinibenchmarkDladdrReturnedZero;
}
if (soname.size() >= 4 && soname.substr(soname.size() - 4) == ".apk") {
return kMinibenchmarkDliFnameHasApkNameOnly;
}
std::string runner_path;
runner_path = temporary_path_ + "/runner";
(void)unlink(runner_path.c_str());
std::string runner_contents(
reinterpret_cast<const char*>(g_tflite_acceleration_embedded_runner),
g_tflite_acceleration_embedded_runner_len);
std::ofstream f(runner_path, std::ios::binary);
if (!f.is_open()) {
return kMinibenchmarkCouldntOpenTemporaryFileForBinary;
}
f << runner_contents;
f.close();
if (chmod(runner_path.c_str(), 0500) != 0) {
return kMinibenchmarkCouldntChmodTemporaryFile;
}
runner_path = ShellEscape(runner_path);
if (android_info.android_sdk_version >= "29") {
#if defined(__arm__) || defined(__i386__)
std::string linker_path = "/system/bin/linker";
#else
std::string linker_path = "/system/bin/linker64";
#endif
runner_path = linker_path + " " + runner_path;
}
runner_path_ = runner_path;
soname_ = soname;
return kMinibenchmarkSuccess;
#endif
}
#ifndef _WIN32
bool ProcessRunner::KillProcessWhenTimedOut(FILE* fstream) {
const int array_length = 1 + kPidBufferLength;
char buffer[array_length];
memset(buffer, '\0', array_length);
ssize_t length = fread(buffer, 1, kPidBufferLength, fstream);
int pid;
if (length != kPidBufferLength || !absl::SimpleAtoi(buffer, &pid)) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Failed to get Validator subprocess id: %s", buffer);
return false;
}
struct pollfd pfd[1];
pfd[0].fd = fileno(fstream);
pfd[0].events = POLLHUP;
int poll_ret = poll(pfd, 1, timeout_millisec_);
if (poll_ret == 0) {
kill(pid, SIGKILL);
return true;
} else if (poll_ret < 0) {
TF_LITE_REPORT_ERROR(error_reporter_, "Validator timer failed: %s",
strerror(errno));
}
return false;
}
#endif
MinibenchmarkStatus ProcessRunner::Run(const Allocation* model_allocation,
const std::vector<std::string>& args,
std::string* output, int* exitcode,
int* signal) {
#ifdef _WIN32
return kMinibenchmarkUnsupportedPlatform;
#else
if (!output || !exitcode) {
return kMinibenchmarkPreconditionNotMet;
}
int benchmark_process_status = 0;
MinibenchmarkStatus status = kMinibenchmarkCommandFailed;
#ifdef TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS
if (function_pointer_) {
benchmark_process_status = RunInprocess(model_allocation, args);
} else {
return kMinibenchmarkPreconditionNotMet;
}
#else
if (runner_path_.empty()) {
return kMinibenchmarkPreconditionNotMet;
}
std::string cmd = runner_path_ + " " + ShellEscape(soname_) + " " +
ShellEscape(function_name_);
int pipe_fds[2];
if (model_allocation != nullptr) {
if (pipe(pipe_fds) < 0) {
*exitcode = errno;
return kMinibenchmarkPipeFailed;
}
std::string pipe_model_path = absl::StrCat(
"pipe:", pipe_fds[0], ":", pipe_fds[1], ":", model_allocation->bytes());
cmd = cmd + " " + ShellEscape(pipe_model_path);
}
for (const auto& arg : args) {
cmd = cmd + " " + ShellEscape(arg);
}
FILE* f = popen(cmd.c_str(), "r");
if (!f) {
*exitcode = errno;
return kMinibenchmarkPopenFailed;
}
if (model_allocation != nullptr) {
close(pipe_fds[0]);
int written_bytes = 0;
int remaining_bytes = model_allocation->bytes();
const uint8_t* current =
static_cast<const uint8_t*>(model_allocation->base());
while (remaining_bytes > 0 &&
(written_bytes = write(pipe_fds[1], current, remaining_bytes)) > 0) {
remaining_bytes -= written_bytes;
current += written_bytes;
}
close(pipe_fds[1]);
if (written_bytes <= 0 || remaining_bytes > 0) {
*exitcode = errno;
return kMinibenchmarkPipeFailed;
}
}
if (timeout_millisec_ > 0 && KillProcessWhenTimedOut(f)) {
status = kMinibenchmarkCommandTimedOut;
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Validator did not finish after %dms. Tried to kill the test.",
timeout_millisec_);
}
std::vector<char> buffer(4 * 1024, 0);
ssize_t length;
std::string ret;
do {
length = fread(buffer.data(), 1, buffer.size(), f);
ret = ret + std::string(buffer.data(), length);
} while (length == buffer.size());
*output = ret;
benchmark_process_status = pclose(f);
#endif
if (WIFEXITED(benchmark_process_status)) {
*exitcode = WEXITSTATUS(benchmark_process_status);
*signal = 0;
if (*exitcode == kMinibenchmarkSuccess) {
status = kMinibenchmarkSuccess;
}
} else if (WIFSIGNALED(benchmark_process_status)) {
*exitcode = 0;
*signal = WTERMSIG(benchmark_process_status);
}
return status;
#endif
}
#ifdef TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS
#ifndef __W_EXITCODE
#define __W_EXITCODE(ret, sig) ((ret) << 8 | (sig))
#endif
int ProcessRunner::RunInprocess(const Allocation* model_allocation,
const std::vector<std::string>& user_args) {
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running Validator in-process.");
std::vector<std::string> args_string;
args_string.push_back("inprocess");
args_string.push_back("inprocess");
args_string.push_back(function_name_);
std::thread write_thread;
if (model_allocation != nullptr) {
int pipe_fds[2];
if (pipe(pipe_fds) < 0) {
return __W_EXITCODE(kMinibenchmarkPipeFailed, 0);
}
args_string.push_back(
absl::StrCat("pipe:", pipe_fds[0], ":-1:", model_allocation->bytes()));
write_thread = std::thread([pipe_fds, model_allocation,
error_reporter = error_reporter_]() {
int written_bytes = 0;
int remaining_bytes = model_allocation->bytes();
const uint8_t* current =
static_cast<const uint8_t*>(model_allocation->base());
while (remaining_bytes > 0 &&
(written_bytes = write(pipe_fds[1], current, remaining_bytes)) >
0) {
remaining_bytes -= written_bytes;
current += written_bytes;
}
close(pipe_fds[1]);
if (written_bytes < 0 || remaining_bytes > 0) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Failed to write Model to pipe: %s. Expect to write %d "
"bytes, %d bytes written.",
strerror(errno), remaining_bytes, written_bytes);
}
});
}
for (int i = 0; i < user_args.size(); i++) {
args_string.push_back(user_args[i]);
}
std::vector<std::vector<char>> args_char(args_string.size());
std::vector<char*> argv(args_string.size());
for (int i = 0; i < args_string.size(); i++) {
args_char[i] = {args_string[i].begin(), args_string[i].end()};
args_char[i].push_back('\0');
argv[i] = args_char[i].data();
}
int (*function_pointer)(int, char**) =
reinterpret_cast<int (*)(int, char**)>(function_pointer_);
int exit_code = __W_EXITCODE(function_pointer(argv.size(), argv.data()), 0);
if (write_thread.joinable()) {
write_thread.join();
}
return exit_code;
}
#endif
namespace {
static const char kDontNeedShellEscapeChars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+-_.=/:,@";
std::string ShellEscape(const std::string& src) {
if (!src.empty() &&
src.find_first_not_of(kDontNeedShellEscapeChars) == std::string::npos) {
return src;
} else if (src.find('\'') == std::string::npos) {
return "'" + src + "'";
} else {
std::string result = "\"";
for (const char c : src) {
switch (c) {
case '\\':
case '$':
case '"':
case '`':
result.push_back('\\');
}
result.push_back(c);
}
result.push_back('"');
return result;
}
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#include <dlfcn.h>
#include <signal.h>
#include <cstddef>
#include <fstream>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/stderr_reporter.h"
#ifdef __ANDROID__
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_runner_executable.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_runner_unit_test_entry_points.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
extern "C" {
int TfLiteFunctionInBinary(int argc, char** argv) { return 2; }
}
namespace tflite {
namespace acceleration {
namespace {
typedef int (*EntryPoint)(int, char**);
using flatbuffers::FlatBufferBuilder;
struct RunnerTest : ::testing::Test {
protected:
void* LoadEntryPointModule() {
void* module =
dlopen(entry_point_file.c_str(), RTLD_NOW | RTLD_LOCAL | RTLD_NODELETE);
EXPECT_TRUE(module) << dlerror();
return module;
}
EntryPoint Load(const char* name) {
#ifdef __ANDROID__
void* module = LoadEntryPointModule();
if (!module) {
return nullptr;
}
#else
auto module = RTLD_DEFAULT;
#endif
void* symbol = dlsym(module, name);
return reinterpret_cast<EntryPoint>(symbol);
}
void SetUp() override {
#ifdef __ANDROID__
entry_point_file = MiniBenchmarkTestHelper::DumpToTempFile(
"librunner_unit_test_entry_points.so",
g_tflite_acceleration_embedded_runner_unit_test_entry_points,
g_tflite_acceleration_embedded_runner_unit_test_entry_points_len);
ASSERT_TRUE(!entry_point_file.empty());
#endif
}
void Init(const char* symbol_name) {
EntryPoint function = Load(symbol_name);
ASSERT_TRUE(function);
runner = std::make_unique<ProcessRunner>(::testing::TempDir(), symbol_name,
function, timeout_ms);
ASSERT_EQ(runner->Init(), kMinibenchmarkSuccess);
}
FlatBufferBuilder CreateTestModel() {
ModelT model;
model.description = "test";
flatbuffers::FlatBufferBuilder fbb;
FinishModelBuffer(fbb, CreateModel(fbb, &model));
return fbb;
}
int exitcode = 0;
int signal = 0;
int timeout_ms = 0;
std::string output;
std::unique_ptr<ProcessRunner> runner;
std::string entry_point_file;
};
#if !defined(__aarch64__)
TEST_F(RunnerTest, LoadSymbol) {
EntryPoint TfLiteJustReturnZero = Load("TfLiteJustReturnZero");
ASSERT_TRUE(TfLiteJustReturnZero);
#ifdef __ANDROID__
Dl_info dl_info;
int status = dladdr(reinterpret_cast<void*>(TfLiteJustReturnZero), &dl_info);
ASSERT_TRUE(status) << dlerror();
ASSERT_TRUE(dl_info.dli_fname) << dlerror();
void* this_module =
dlopen(dl_info.dli_fname, RTLD_NOW | RTLD_LOCAL | RTLD_NODELETE);
ASSERT_TRUE(this_module);
void* symbol = dlsym(this_module, "TfLiteJustReturnZero");
EXPECT_TRUE(symbol);
#endif
}
TEST_F(RunnerTest, JustReturnZero) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteJustReturnZero"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed);
EXPECT_EQ(exitcode, 0);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, ReturnOne) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReturnOne"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed);
EXPECT_EQ(exitcode, 1);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, ReturnSuccess) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReturnSuccess"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, NullFunctionPointer) {
ProcessRunner runner("foo", "bar", nullptr);
EXPECT_EQ(runner.Init(), kMinibenchmarkPreconditionNotMet);
EXPECT_EQ(runner.Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkPreconditionNotMet);
}
#ifdef __ANDROID__
TEST_F(RunnerTest, SigKillSubprocess) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteSigKillSelf"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed);
EXPECT_EQ(exitcode, 0);
EXPECT_EQ(signal, SIGKILL);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, WriteOk) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWriteOk"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "ok\n");
}
TEST_F(RunnerTest, Write10kChars) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWrite10kChars"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output.size(), 10000);
}
TEST_F(RunnerTest, ArgsArePassed) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWriteArgs"));
EXPECT_EQ(
runner->Run(nullptr, {"foo", "bar", "baz"}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "foo\nbar\nbaz\n");
}
TEST_F(RunnerTest, SymbolLookupFailed) {
ProcessRunner runner(::testing::TempDir(), "TfLiteFunctionInBinary",
TfLiteFunctionInBinary);
EXPECT_EQ(runner.Init(), kMinibenchmarkSuccess);
EXPECT_EQ(runner.Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed)
<< output;
EXPECT_EQ(exitcode, kMinibenchmarkRunnerMainSymbolLookupFailed) << output;
}
TEST_F(RunnerTest, ReadModelFromPipe) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReadFromPipe"));
FlatBufferBuilder model = CreateTestModel();
MemoryAllocation alloc(model.GetBufferPointer(), model.GetSize(),
tflite::DefaultErrorReporter());
EXPECT_EQ(runner->Run(&alloc, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output,
std::string((char*)model.GetBufferPointer(), model.GetSize()));
}
TEST_F(RunnerTest, ProcessTimedOut) {
timeout_ms = 1000;
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWritePidThenSleepNSec"));
EXPECT_EQ(runner->Run(nullptr, {"30"}, &output, &exitcode, &signal),
kMinibenchmarkCommandTimedOut);
EXPECT_EQ(signal, SIGKILL);
EXPECT_EQ(output, "");
EXPECT_EQ(exitcode, 0);
}
TEST_F(RunnerTest, ProcessDoNotTimedOut) {
timeout_ms = 3000;
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWritePidThenSleepNSec"));
EXPECT_EQ(runner->Run(nullptr, {"1"}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
}
#else
TEST_F(RunnerTest, ReadModelFromPipeNonAndroid) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReadFromPipeInProcess"));
FlatBufferBuilder model = CreateTestModel();
MemoryAllocation alloc(model.GetBufferPointer(), model.GetSize(),
tflite::DefaultErrorReporter());
EXPECT_EQ(runner->Run(&alloc, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
}
#endif
#endif
}
}
} |
737 | cpp | tensorflow/tensorflow | file_lock | tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_FILE_LOCK_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_FILE_LOCK_H_
#ifndef _WIN32
#include <unistd.h>
#endif
#include <string>
namespace tflite {
namespace acceleration {
class FileLock {
public:
explicit FileLock(const std::string& path) : path_(path) {}
FileLock(FileLock&& other) = default;
FileLock& operator=(FileLock&& other) = default;
~FileLock() {
#ifndef _WIN32
if (fd_ >= 0) {
close(fd_);
}
#endif
}
bool TryLock();
private:
std::string path_;
int fd_ = -1;
};
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#ifndef _WIN32
#include <fcntl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
#include <string>
namespace tflite {
namespace acceleration {
bool FileLock::TryLock() {
#ifndef _WIN32
if (fd_ < 0) {
fd_ = open(path_.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600);
}
if (fd_ < 0) {
return false;
}
if (flock(fd_, LOCK_EX | LOCK_NB) == 0) {
return true;
}
#endif
return false;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include <csignal>
#include <iostream>
#include <string>
#include <utility>
#include <gtest/gtest.h>
namespace tflite {
namespace acceleration {
namespace {
class FileLockTest : public ::testing::Test {
protected:
void SetUp() override { file_path_ = ::testing::TempDir() + "/file_lock"; }
std::string file_path_;
};
TEST_F(FileLockTest, CanLock) { EXPECT_TRUE(FileLock(file_path_).TryLock()); }
TEST_F(FileLockTest, FailIfLockMoreThanOnce) {
FileLock lock_one(file_path_);
FileLock lock_two(file_path_);
ASSERT_TRUE(lock_one.TryLock());
EXPECT_FALSE(lock_two.TryLock());
}
TEST_F(FileLockTest, LockReleasedWhenThreadCrash) {
pid_t pid = fork();
if (pid == 0) {
FileLock lock(file_path_);
if (!lock.TryLock()) {
_exit(1);
}
std::cout << "Lock acquired successfully.";
kill(getpid(), SIGKILL);
}
int wstatus;
int w = waitpid(pid, &wstatus, WUNTRACED);
ASSERT_NE(w, -1);
FileLock lock_two(file_path_);
EXPECT_TRUE(lock_two.TryLock());
}
}
}
} |
738 | cpp | tensorflow/tensorflow | validator_runner_entrypoint | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_RUNNER_ENTRYPOINT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_RUNNER_ENTRYPOINT_H_
extern "C" int Java_org_tensorflow_lite_acceleration_validation_entrypoint(
int argc, char** argv);
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint.h"
#include <dlfcn.h>
#include <memory>
#include <string>
#ifndef _WIN32
#include <fcntl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdint>
#include <thread>
#include <utility>
#include <vector>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/set_big_core_affinity.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using flatbuffers::Offset;
Validator::Status RunValidator(const std::string& model_path,
const std::string& delegate_so_path,
const TFLiteSettingsT& tflite_settings,
Validator::Results& results) {
TFLiteSettingsT copy(tflite_settings);
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> nnapi_sl_handle;
if (!delegate_so_path.empty()) {
if (tflite_settings.nnapi_settings) {
nnapi_sl_handle =
::tflite::nnapi::loadNnApiSupportLibrary(delegate_so_path);
if (!nnapi_sl_handle) {
return Validator::Status{kMiniBenchmarkCannotLoadSupportLibrary,
BenchmarkStage_INITIALIZATION};
}
copy.nnapi_settings->support_library_handle =
reinterpret_cast<uint64_t>(nnapi_sl_handle->getFL5());
} else if (tflite_settings.gpu_settings) {
copy.stable_delegate_loader_settings =
std::make_unique<StableDelegateLoaderSettingsT>();
copy.stable_delegate_loader_settings->delegate_path = delegate_so_path;
}
}
flatbuffers::FlatBufferBuilder fbb;
fbb.Finish(CreateComputeSettings(fbb, ExecutionPreference_ANY,
CreateTFLiteSettings(fbb, ©)));
std::unique_ptr<tools::ModelLoader> model_loader =
tools::CreateModelLoaderFromPath(model_path);
if (!model_loader) {
return Validator::Status{kMinibenchmarkPreconditionNotMet,
BenchmarkStage_INITIALIZATION};
}
auto validator = std::make_unique<Validator>(
std::move(model_loader),
flatbuffers::GetRoot<ComputeSettings>(fbb.GetBufferPointer()));
return validator->RunValidation(&results);
}
}
extern "C" {
int Java_org_tensorflow_lite_acceleration_validation_entrypoint(int argc,
char** argv) {
if (argc < 6) return 1;
const std::string model_path = argv[3];
const std::string storage_path = argv[4];
const std::string nnapi_sl_path = argc > 6 ? argv[6] : "";
FileLock lock(storage_path + ".child_lock");
if (!lock.TryLock()) {
return kMinibenchmarkChildProcessAlreadyRunning;
}
std::string pid = std::to_string(getpid());
pid.resize(kPidBufferLength);
if (write(1, pid.data(), kPidBufferLength) != kPidBufferLength) {
return kMinibenchmarkPreconditionNotMet;
}
FlatbufferStorage<BenchmarkEvent> storage(storage_path);
MinibenchmarkStatus read_status = storage.Read();
if (read_status != kMinibenchmarkSuccess) {
return read_status;
}
TFLiteSettingsT tflite_settings;
int32_t set_big_core_affinity_errno = SetBigCoresAffinity();
if (set_big_core_affinity_errno != 0) {
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings),
BenchmarkEventType_RECOVERED_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_INITIALIZATION,
set_big_core_affinity_errno,
0,
0,
kMinibenchmarkUnableToSetCpuAffinity),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
Validator::Status run_status =
Validator::Status{kMinibenchmarkNoValidationRequestFound};
for (int i = storage.Count() - 1; i >= 0; i--) {
const BenchmarkEvent* event = storage.Get(i);
if (event->event_type() == BenchmarkEventType_START) {
event->tflite_settings()->UnPackTo(&tflite_settings);
Validator::Results results;
run_status =
RunValidator(model_path, nnapi_sl_path, tflite_settings, results);
if (run_status.status != kMinibenchmarkSuccess) {
break;
}
flatbuffers::FlatBufferBuilder fbb;
std::vector<int64_t> delegate_prep_time_us{results.delegate_prep_time_us};
std::vector<Offset<tflite::BenchmarkMetric>> metrics;
metrics.reserve(results.metrics.size());
for (const auto& name_and_values : results.metrics) {
metrics.push_back(
CreateBenchmarkMetric(fbb, fbb.CreateString(name_and_values.first),
fbb.CreateVector(name_and_values.second)));
}
std::vector<Offset<BenchmarkResult_::InferenceOutput>> actual_output;
for (const auto& output : results.actual_inference_output) {
const uint8_t* output_uint8 =
reinterpret_cast<const uint8_t*>(output.data());
actual_output.push_back(BenchmarkResult_::CreateInferenceOutput(
fbb, fbb.CreateVector(output_uint8, output.size())));
}
return storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings),
BenchmarkEventType_END,
CreateBenchmarkResult(
fbb, fbb.CreateVector(delegate_prep_time_us),
fbb.CreateVector(results.execution_time_us), 0, results.ok,
fbb.CreateVector(metrics), fbb.CreateVector(actual_output)),
0, Validator::BootTimeMicros(),
Validator::WallTimeMicros()));
}
}
flatbuffers::FlatBufferBuilder fbb;
return storage.Append(
&fbb, CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, run_status.stage, 0,
0, 0,
run_status.status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
}
}
}
#endif | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint.h"
#include <sys/types.h>
#include <fstream>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
namespace tflite {
namespace acceleration {
static int32_t big_core_affinity_result;
int32_t SetBigCoresAffinity() { return big_core_affinity_result; }
namespace {
class ValidatorRunnerEntryPointTest : public ::testing::Test {
protected:
ValidatorRunnerEntryPointTest()
: storage_path_(::testing::TempDir() + "/events.fb"),
storage_(storage_path_) {}
std::vector<const tflite::BenchmarkEvent*> GetEvents() {
std::vector<const tflite::BenchmarkEvent*> result;
storage_.Read();
int storage_size = storage_.Count();
if (storage_size == 0) {
return result;
}
for (int i = 0; i < storage_size; i++) {
const ::tflite::BenchmarkEvent* event = storage_.Get(i);
result.push_back(event);
}
return result;
}
void ClearEvents() { (void)unlink(storage_path_.c_str()); }
void SetUp() override {
ClearEvents();
SetBigCoreAffinityReturns(0);
}
int CallEntryPoint(std::string cpu_affinity = "0") {
std::vector<std::string> args = {
"test",
"binary_name",
"Java_org_tensorflow_lite_acceleration_validation_entrypoint",
"model_path",
storage_path_,
"data_dir"};
std::vector<std::vector<char>> mutable_args(args.size());
std::vector<char*> argv(args.size());
for (int i = 0; i < mutable_args.size(); i++) {
mutable_args[i] = {args[i].data(), args[i].data() + args[i].size()};
mutable_args[i].push_back('\0');
argv[i] = mutable_args[i].data();
}
return Java_org_tensorflow_lite_acceleration_validation_entrypoint(
argv.size(), argv.data());
}
void SetBigCoreAffinityReturns(int32_t value) {
big_core_affinity_result = value;
}
std::string storage_path_;
FlatbufferStorage<BenchmarkEvent> storage_;
};
TEST_F(ValidatorRunnerEntryPointTest, NotEnoughArguments) {
std::vector<std::string> args = {
"test", "binary_name",
"Java_org_tensorflow_lite_acceleration_validation_entrypoint",
"model_path", storage_path_};
std::vector<std::vector<char>> mutable_args(args.size());
std::vector<char*> argv(args.size());
for (int i = 0; i < mutable_args.size(); i++) {
mutable_args[i] = {args[i].data(), args[i].data() + args[i].size()};
mutable_args[i].push_back('\0');
argv[i] = mutable_args[i].data();
}
EXPECT_EQ(1, Java_org_tensorflow_lite_acceleration_validation_entrypoint(
5, argv.data()));
}
TEST_F(ValidatorRunnerEntryPointTest, NoValidationRequestFound) {
EXPECT_EQ(kMinibenchmarkSuccess, CallEntryPoint());
std::vector<const tflite::BenchmarkEvent*> events = GetEvents();
ASSERT_THAT(events, testing::SizeIs(1));
const tflite::BenchmarkEvent* event = events[0];
EXPECT_EQ(BenchmarkEventType_ERROR, event->event_type());
EXPECT_EQ(kMinibenchmarkNoValidationRequestFound,
event->error()->mini_benchmark_error_code());
}
TEST_F(ValidatorRunnerEntryPointTest, CannotSetCpuAffinity) {
SetBigCoreAffinityReturns(10);
EXPECT_EQ(kMinibenchmarkSuccess, CallEntryPoint("invalid_cpu_affinity"));
std::vector<const tflite::BenchmarkEvent*> events = GetEvents();
ASSERT_THAT(events, testing::SizeIs(2));
const tflite::BenchmarkEvent* event = events[0];
EXPECT_EQ(BenchmarkEventType_RECOVERED_ERROR, event->event_type());
EXPECT_EQ(kMinibenchmarkUnableToSetCpuAffinity,
event->error()->mini_benchmark_error_code());
EXPECT_EQ(10, event->error()->exit_code());
}
TEST_F(ValidatorRunnerEntryPointTest, CannotLoadNnapi) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsT tflite_settings;
NNAPISettingsT nnapi_settings;
ASSERT_EQ(
storage_.Append(
&fbb,
CreateBenchmarkEvent(
fbb,
CreateTFLiteSettings(fbb, Delegate_NNAPI,
CreateNNAPISettings(fbb, &nnapi_settings)),
BenchmarkEventType_START, 0, 0,
Validator::BootTimeMicros(), Validator::WallTimeMicros())),
kMinibenchmarkSuccess);
std::vector<std::string> args = {
"test",
"binary_name",
"Java_org_tensorflow_lite_acceleration_validation_entrypoint",
"model_path",
storage_path_,
"data_directory_path",
"nnapi_path"};
std::vector<std::vector<char>> mutable_args(args.size());
std::vector<char*> argv(args.size());
for (int i = 0; i < mutable_args.size(); i++) {
mutable_args[i] = {args[i].data(), args[i].data() + args[i].size()};
mutable_args[i].push_back('\0');
argv[i] = mutable_args[i].data();
}
EXPECT_EQ(kMinibenchmarkSuccess,
Java_org_tensorflow_lite_acceleration_validation_entrypoint(
7, argv.data()));
std::vector<const tflite::BenchmarkEvent*> events = GetEvents();
ASSERT_THAT(events, testing::SizeIs(2));
const tflite::BenchmarkEvent* event = events[1];
EXPECT_EQ(BenchmarkEventType_ERROR, event->event_type());
EXPECT_EQ(kMiniBenchmarkCannotLoadSupportLibrary,
event->error()->mini_benchmark_error_code());
}
}
}
} |
739 | cpp | tensorflow/tensorflow | jpeg_header_parser | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_JPEG_HEADER_PARSER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_JPEG_HEADER_PARSER_H_
#include <string>
#include <tuple>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_common.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
Status ReadJpegHeader(const tflite::StringRef& jpeg_image_data,
JpegHeader* header);
Status BuildImageWithNewHeader(const tflite::StringRef& orig_jpeg_image_data,
const JpegHeader& new_header,
std::string& new_image_data);
}
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include <cstdint>
#include <memory>
#include <string>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
using MarkerId = uint16_t;
void AsWord(int value, char* msb, char* lsb) {
*msb = static_cast<char>(value >> 8);
*lsb = static_cast<char>(value);
}
class JfifHeaderParser {
public:
explicit JfifHeaderParser(const tflite::StringRef& jpeg_image_data)
: jpeg_image_data_(jpeg_image_data), offset_(0) {
if (!IsJpegImage(jpeg_image_data_)) {
is_valid_image_buffer_ = false;
validation_error_message_ = "Not a valid JPEG image.";
} else if (!IsJfifImage(jpeg_image_data_)) {
is_valid_image_buffer_ = false;
validation_error_message_ = "Image is not in JFIF format.";
return;
} else {
is_valid_image_buffer_ = true;
}
}
#define ENSURE_READ_STATUS(a) \
do { \
const TfLiteStatus s = (a); \
if (s != kTfLiteOk) { \
return {s, "Error trying to parse JPEG header."}; \
} \
} while (0)
Status ReadJpegHeader(JpegHeader* result) {
if (!is_valid_image_buffer_) {
return {kTfLiteError, validation_error_message_};
}
Status move_to_sof_status = MoveToStartOfFrameMarker();
if (move_to_sof_status.code != kTfLiteOk) {
return move_to_sof_status;
}
ENSURE_READ_STATUS(SkipBytes(2));
char precision;
ENSURE_READ_STATUS(ReadByte(&precision));
uint16_t height;
ENSURE_READ_STATUS(ReadWord(&height));
uint16_t width;
ENSURE_READ_STATUS(ReadWord(&width));
char num_of_components;
ENSURE_READ_STATUS(ReadByte(&num_of_components));
if (num_of_components != 1 && num_of_components != 3) {
return {kTfLiteError,
"A JFIF image without App14 marker doesn't support a number of "
"components = " +
std::to_string(static_cast<int>(num_of_components))};
}
result->width = width;
result->height = height;
result->channels = num_of_components;
result->bits_per_sample = precision;
return {kTfLiteOk, ""};
}
Status ApplyHeaderToImage(const JpegHeader& new_header,
std::string& write_to) {
if (!is_valid_image_buffer_) {
return {kTfLiteError, validation_error_message_};
}
Status move_to_sof_status = MoveToStartOfFrameMarker();
if (move_to_sof_status.code != kTfLiteOk) {
return move_to_sof_status;
}
ENSURE_READ_STATUS(SkipBytes(2));
if (!HasData(6)) {
return {kTfLiteError,
"Invalid SOF marker, image buffer ends before end of marker"};
}
char header[6];
header[0] = static_cast<char>(new_header.bits_per_sample);
AsWord(new_header.height, header + 1, header + 2);
AsWord(new_header.width, header + 3, header + 4);
header[5] = static_cast<char>(new_header.channels);
write_to.clear();
write_to.append(jpeg_image_data_.str, offset_);
write_to.append(header, 6);
ENSURE_READ_STATUS(SkipBytes(6));
if (HasData()) {
write_to.append(jpeg_image_data_.str + offset_,
jpeg_image_data_.len - offset_);
}
return {kTfLiteOk, ""};
}
private:
const tflite::StringRef jpeg_image_data_;
int offset_;
bool is_valid_image_buffer_;
std::string validation_error_message_;
Status MoveToStartOfFrameMarker() {
const MarkerId kStartOfStreamMarkerId = 0xFFDA;
offset_ = 0;
ENSURE_READ_STATUS(SkipBytes(4));
ENSURE_READ_STATUS(SkipCurrentMarker());
MarkerId curr_marker_id;
while (HasData(4)) {
ENSURE_READ_STATUS(ReadWord(&curr_marker_id));
if (IsStartOfFrameMarkerId(curr_marker_id)) {
break;
}
if (curr_marker_id == kStartOfStreamMarkerId) {
return {kTfLiteError, "Error trying to parse JPEG header."};
}
ENSURE_READ_STATUS(SkipCurrentMarker());
}
return {kTfLiteOk, ""};
}
#undef ENSURE_READ_STATUS
bool HasData(int min_data_size = 1) {
return offset_ <= jpeg_image_data_.len - min_data_size;
}
TfLiteStatus SkipBytes(int bytes) {
if (!HasData(bytes)) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Trying to move out of image boundaries from offset %d, "
"skipping %d bytes",
offset_, bytes);
return kTfLiteError;
}
offset_ += bytes;
return kTfLiteOk;
}
TfLiteStatus ReadByte(char* result) {
if (!HasData()) {
return kTfLiteError;
}
*result = jpeg_image_data_.str[offset_];
return SkipBytes(1);
}
TfLiteStatus ReadWord(uint16_t* result) {
TF_LITE_ENSURE_STATUS(ReadWordAt(jpeg_image_data_, offset_, result));
return SkipBytes(2);
}
TfLiteStatus SkipCurrentMarker() {
uint16_t full_marker_len;
TF_LITE_ENSURE_STATUS(ReadWord(&full_marker_len));
if (full_marker_len <= 2) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Invalid marker length %d read at offset %X", full_marker_len,
offset_);
return kTfLiteError;
}
return SkipBytes(full_marker_len - 2);
}
static TfLiteStatus ReadWordAt(const tflite::StringRef& jpeg_image_data,
int read_offset, uint16_t* result) {
if (read_offset < 0 || read_offset + 2 > jpeg_image_data.len) {
return kTfLiteError;
}
const unsigned char* buf =
reinterpret_cast<const unsigned char*>(jpeg_image_data.str);
*result = (buf[read_offset] << 8) + buf[read_offset + 1];
return kTfLiteOk;
}
static bool IsJpegImage(const tflite::StringRef& jpeg_image_data) {
const MarkerId kStartOfImageMarkerId = 0xFFD8;
const MarkerId kEndOfImageMarkerId = 0xFFD9;
MarkerId soi_marker_id;
MarkerId eoi_marker_id;
if (ReadWordAt(jpeg_image_data, 0, &soi_marker_id) != kTfLiteOk) {
return false;
}
if (ReadWordAt(jpeg_image_data, jpeg_image_data.len - 2, &eoi_marker_id) !=
kTfLiteOk) {
return false;
}
return (soi_marker_id == kStartOfImageMarkerId) &&
(eoi_marker_id == kEndOfImageMarkerId);
}
static bool IsJfifImage(const tflite::StringRef& jpeg_image_data) {
const MarkerId kApp0MarkerId = 0xFFE0;
MarkerId app_marker_id;
if ((ReadWordAt(jpeg_image_data, 2, &app_marker_id) != kTfLiteOk) ||
(app_marker_id != kApp0MarkerId)) {
return false;
}
const std::string kJfifIdString{"JFIF\0", 5};
const int KJfifIdStringStartOffset = 6;
if (KJfifIdStringStartOffset + kJfifIdString.size() >=
jpeg_image_data.len) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Invalid image, reached end of data at offset while "
"parsing APP0 header");
return false;
}
const std::string actualImgId(
jpeg_image_data.str + KJfifIdStringStartOffset, kJfifIdString.size());
if (kJfifIdString != actualImgId) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Invalid image, invalid APP0 header");
return false;
}
return true;
}
static bool IsStartOfFrameMarkerId(MarkerId marker_id) {
return 0xFFC0 <= marker_id && marker_id < 0xFFCF;
}
};
}
Status ReadJpegHeader(const tflite::StringRef& jpeg_image_data,
JpegHeader* header) {
JfifHeaderParser parser(jpeg_image_data);
return parser.ReadJpegHeader(header);
}
Status BuildImageWithNewHeader(const tflite::StringRef& orig_jpeg_image_data,
const JpegHeader& new_header,
std::string& new_image_data) {
JfifHeaderParser parser(orig_jpeg_image_data);
return parser.ApplyHeaderToImage(new_header, new_image_data);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_chessboard_jpeg.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
void PrintTo(const Status& status, std::ostream* os) {
*os << "{ code: " + std::to_string(status.code) + ", error_message: '" +
status.error_message + "'}";
}
}
}
}
namespace {
using ::testing::AllOf;
using ::testing::Eq;
using ::testing::Field;
using ::testing::Matcher;
using tflite::acceleration::decode_jpeg_kernel::JpegHeader;
using tflite::acceleration::decode_jpeg_kernel::ReadJpegHeader;
Matcher<JpegHeader> JpegHeaderEq(const JpegHeader& expected) {
return AllOf(
Field(&JpegHeader::channels, Eq(expected.channels)),
Field(&JpegHeader::height, Eq(expected.height)),
Field(&JpegHeader::width, Eq(expected.width)),
Field(&JpegHeader::bits_per_sample, Eq(expected.bits_per_sample)));
}
using tflite::acceleration::decode_jpeg_kernel::Status;
Matcher<Status> StatusEq(const Status& expected) {
return AllOf(Field(&Status::code, Eq(expected.code)),
Field(&Status::error_message, Eq(expected.error_message)));
}
const int kChessboardImgHeight = 300;
const int kChessboardImgWidth = 250;
const int kChessboardImgChannels = 3;
TEST(ReadJpegHeader, ShouldParseValidJpgImage) {
const tflite::StringRef chessboard_image{
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
static_cast<size_t>(g_tflite_acceleration_chessboard_jpeg_len)};
ASSERT_GT(chessboard_image.len, 4);
JpegHeader header;
ASSERT_THAT(ReadJpegHeader(chessboard_image, &header),
StatusEq({kTfLiteOk, ""}));
EXPECT_THAT(header, JpegHeaderEq({kChessboardImgHeight, kChessboardImgWidth,
kChessboardImgChannels}));
}
TEST(ReadJpegHeader, ShouldFailForInvalidJpegImage) {
const std::string invalid_image = "invalid image content";
const tflite::StringRef invalid_image_ref{invalid_image.c_str(),
invalid_image.size()};
JpegHeader header;
EXPECT_THAT(ReadJpegHeader(invalid_image_ref, &header),
StatusEq({kTfLiteError, "Not a valid JPEG image."}));
}
TEST(ReadJpegHeader, ShouldFailForEmptyJpegImage) {
const tflite::StringRef invalid_image_ref{"", 0};
JpegHeader header;
EXPECT_THAT(ReadJpegHeader(invalid_image_ref, &header),
StatusEq({kTfLiteError, "Not a valid JPEG image."}));
}
TEST(ApplyHeaderToImage, ReturnsNewImageWithDifferentHeader) {
const tflite::StringRef chessboard_image{
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
static_cast<size_t>(g_tflite_acceleration_chessboard_jpeg_len)};
JpegHeader new_header{
.height = 20, .width = 30, .channels = 1, .bits_per_sample = 3};
std::string new_image_data;
ASSERT_THAT(
BuildImageWithNewHeader(chessboard_image, new_header, new_image_data),
StatusEq({kTfLiteOk, ""}));
const tflite::StringRef altered_image{new_image_data.c_str(),
new_image_data.size()};
JpegHeader header;
ASSERT_THAT(ReadJpegHeader(altered_image, &header),
StatusEq({kTfLiteOk, ""}));
EXPECT_THAT(header, JpegHeaderEq(new_header));
}
} |
740 | cpp | tensorflow/tensorflow | validator | tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_H_
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
class Validator {
public:
Validator(std::unique_ptr<tools::ModelLoader> model_loader,
const ComputeSettings* compute_settings)
: model_loader_(std::move(model_loader)),
compute_settings_(compute_settings) {}
struct Results {
bool ok = false;
std::map<std::string, std::vector<float>> metrics;
int64_t delegate_prep_time_us = 0;
std::vector<int64_t> execution_time_us;
int delegate_error = 0;
int delegated_kernels = 0;
std::vector<std::vector<char>> actual_inference_output;
};
struct Status {
MinibenchmarkStatus status;
BenchmarkStage stage = BenchmarkStage_UNKNOWN;
};
Status RunValidation(Results* results_out);
static int64_t BootTimeMicros();
static int64_t WallTimeMicros();
Validator(Validator&) = delete;
Validator& operator=(Validator&) = delete;
Validator(Validator&&) = delete;
Validator& operator=(Validator&&) = delete;
private:
using TfLiteOpaqueDelegatePtr =
std::unique_ptr<TfLiteOpaqueDelegateStruct,
void (*)(TfLiteOpaqueDelegateStruct*)>;
MinibenchmarkStatus LoadDelegate();
MinibenchmarkStatus LoadOpaqueDelegate();
MinibenchmarkStatus CreateInterpreter(int* delegate_error_out,
int* delegated_kernels_out);
MinibenchmarkStatus CheckGoldenOutput(Results* results_out);
std::unique_ptr<tools::ModelLoader> model_loader_;
const ComputeSettings* compute_settings_;
std::unique_ptr<Interpreter> golden_interpreter_;
std::unique_ptr<Interpreter> interpreter_;
std::unique_ptr<::tflite::MutableOpResolver> resolver_;
std::unique_ptr<FlatBufferModel> model_;
::tflite::delegates::TfLiteDelegatePtr delegate_ =
delegates::TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
TfLiteOpaqueDelegatePtr opaque_delegate_ =
TfLiteOpaqueDelegatePtr(nullptr, [](TfLiteOpaqueDelegate*) {});
std::unique_ptr<tflite::delegates::DelegatePluginInterface> delegate_plugin_;
int validation_entrypoint_index_ = -1;
Subgraph* validation_entrypoint_ = nullptr;
Subgraph* main_model_ = nullptr;
bool has_accuracy_validation_ = false;
};
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include <stdint.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/tools/benchmark/register_custom_op.h"
#include "tensorflow/lite/tools/model_loader.h"
#ifndef TEMP_FAILURE_RETRY
#ifdef __ANDROID__
#error "TEMP_FAILURE_RETRY not set although on Android"
#else
#define TEMP_FAILURE_RETRY(exp) exp
#endif
#endif
namespace tflite {
namespace acceleration {
namespace {
std::unique_ptr<tflite::delegates::DelegatePluginInterface> LoadDelegatePlugin(
const std::string& name, const tflite::TFLiteSettings& tflite_settings) {
return tflite::delegates::DelegatePluginRegistry::CreateByName(
name + "Plugin", tflite_settings);
}
void AppendTensorDataToVector(const TfLiteTensor* tensor,
std::vector<std::vector<char>>& output_vector) {
std::vector<char> char_output(TfLiteTensorByteSize(tensor));
memcpy(char_output.data(), TfLiteTensorData(tensor),
TfLiteTensorByteSize(tensor));
output_vector.emplace_back(std::move(char_output));
}
inline bool HasTensorData(tools::ModelLoader* model_loader,
const Subgraph& graph, int index) {
const TfLiteTensor* tensor = graph.tensor(index);
return tensor->allocation != nullptr ||
(model_loader->type() == tools::ModelLoader::Type::kPipeModelLoader &&
tensor->data.data != nullptr);
}
constexpr int64_t kMicrosInSecond = 1000 * 1000;
constexpr int64_t kNanosInMicro = 1000;
int64_t ElapsedTimeMicros() {
struct timespec ts;
#if defined(__ANDROID__)
int err = clock_gettime(CLOCK_BOOTTIME, &ts);
#elif defined(_WIN32)
int err = 1;
#else
int err = clock_gettime(CLOCK_MONOTONIC, &ts);
#endif
if (err) {
return -1;
}
return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;
}
class ValidatorProfiler : public ::tflite::Profiler {
public:
struct EventData {
std::string tag;
int64_t start_time_us = -1;
int64_t end_time_us = -1;
};
const std::vector<EventData>& events() { return events_; }
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
if (event_type != EventType::DEFAULT) {
return 0;
}
events_.push_back({tag, ElapsedTimeMicros(), -1});
return events_.size();
}
void EndEvent(uint32_t event_handle) override {
if (event_handle == 0) {
return;
}
events_[event_handle - 1].end_time_us = ElapsedTimeMicros();
}
private:
std::vector<EventData> events_;
};
}
MinibenchmarkStatus Validator::CheckGoldenOutput(Results* results_out) {
if (!interpreter_ || !model_loader_->GetModel()) {
return kMinibenchmarkPreconditionNotMet;
}
if (validation_entrypoint_->inputs().size() <= 1) {
return kMinibenchmarkValidationSubgraphHasTooFewInputs;
}
if (validation_entrypoint_->inputs().size() >
validation_entrypoint_->outputs().size()) {
return kMinibenchmarkValidationSubgraphHasTooFewOutputs;
}
if (HasTensorData(model_loader_.get(), *validation_entrypoint_,
validation_entrypoint_->inputs()[0])) {
return kMinibenchmarkSuccess;
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Running on CPU to get golden output for comparison.");
tflite::InterpreterBuilder(*model_loader_->GetModel(),
*resolver_)(&golden_interpreter_);
if (!golden_interpreter_) {
return kMinibenchmarkInterpreterBuilderFailed;
}
Subgraph* golden_validation_entrypoint =
golden_interpreter_->subgraph(validation_entrypoint_index_);
if (golden_validation_entrypoint->AllocateTensors() != kTfLiteOk) {
return kMinibenchmarkAllocateTensorsFailed;
}
for (int i = 0; i < golden_validation_entrypoint->inputs().size() - 1; i++) {
TfLiteTensor* input_tensor = golden_validation_entrypoint->tensor(
golden_validation_entrypoint->inputs()[i]);
memset(input_tensor->data.data, 0, input_tensor->bytes);
}
if (golden_validation_entrypoint->Invoke() != kTfLiteOk) {
return kMinibenchmarkInvokeFailed;
}
for (int i = 0; i < validation_entrypoint_->inputs().size() - 1; i++) {
TfLiteTensor* input_tensor =
validation_entrypoint_->tensor(validation_entrypoint_->inputs()[i]);
TfLiteTensor* golden_output_tensor = golden_validation_entrypoint->tensor(
golden_validation_entrypoint->outputs()[i]);
if (input_tensor->bytes != golden_output_tensor->bytes) {
return kMinibenchmarkValidationSubgraphInputsDontMatchOutputs;
}
memcpy(input_tensor->data.data, golden_output_tensor->data.data,
golden_output_tensor->bytes);
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::LoadDelegate() {
if (!compute_settings_) {
return kMinibenchmarkPreconditionNotMet;
}
if (opaque_delegate_) {
return kMinibenchmarkSuccess;
}
Delegate which_delegate = Delegate_NONE;
bool is_stable_delegate_path_provided = false;
auto tflite_settings = compute_settings_->tflite_settings();
if (tflite_settings) {
which_delegate = compute_settings_->tflite_settings()->delegate();
if (tflite_settings->stable_delegate_loader_settings()) {
is_stable_delegate_path_provided =
tflite_settings->stable_delegate_loader_settings()->delegate_path() &&
!tflite_settings->stable_delegate_loader_settings()
->delegate_path()
->str()
.empty();
}
}
std::string delegate_name;
if (is_stable_delegate_path_provided && which_delegate == Delegate_GPU) {
delegate_name = "GpuModule";
} else if (is_stable_delegate_path_provided) {
delegate_name = "StableDelegate";
} else {
switch (which_delegate) {
case Delegate_NONE:
return kMinibenchmarkSuccess;
case Delegate_NNAPI:
delegate_name = "Nnapi";
break;
case Delegate_GPU:
delegate_name = "Gpu";
break;
case Delegate_XNNPACK:
delegate_name = "XNNPack";
break;
case Delegate_EDGETPU:
delegate_name = "EdgeTpu";
break;
default:
return kMinibenchmarkDelegateNotSupported;
}
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s",
delegate_name.c_str());
if (!(delegate_plugin_ = LoadDelegatePlugin(
delegate_name, *compute_settings_->tflite_settings()))) {
return kMinibenchmarkDelegatePluginNotFound;
}
if (!(delegate_ = delegate_plugin_->Create())) {
return kMinibenchmarkDelegateCreateFailed;
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::LoadOpaqueDelegate() {
if (!compute_settings_) {
return kMinibenchmarkPreconditionNotMet;
}
bool is_stable_delegate_name_provided = false;
auto tflite_settings = compute_settings_->tflite_settings();
if (!tflite_settings) {
return kMinibenchmarkSuccess;
}
auto stable_delegate_settings =
tflite_settings->stable_delegate_loader_settings();
is_stable_delegate_name_provided =
stable_delegate_settings && stable_delegate_settings->delegate_name() &&
!stable_delegate_settings->delegate_name()->str().empty();
if (!is_stable_delegate_name_provided) {
return kMinibenchmarkSuccess;
}
std::string delegate_name = stable_delegate_settings->delegate_name()->str();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s",
delegate_name.c_str());
const TfLiteStableDelegate* stable_delegate =
delegates::StableDelegateRegistry::RetrieveStableDelegate(delegate_name);
if (!stable_delegate) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to load stable delegate plugin %s",
delegate_name.c_str());
return kMinibenchmarkDelegatePluginNotFound;
}
const TfLiteOpaqueDelegatePlugin* delegate_plugin =
stable_delegate->delegate_plugin;
opaque_delegate_ = TfLiteOpaqueDelegatePtr(
delegate_plugin->create(tflite_settings), delegate_plugin->destroy);
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::CreateInterpreter(int* delegate_error_out,
int* delegated_kernels_out) {
if (!delegate_error_out || !delegated_kernels_out ||
!model_loader_->GetModel()) {
return kMinibenchmarkPreconditionNotMet;
}
if (interpreter_) {
return kMinibenchmarkSuccess;
}
*delegate_error_out = 0;
if (compute_settings_->tflite_settings() &&
compute_settings_->tflite_settings()->disable_default_delegates()) {
resolver_ = std::make_unique<
::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
} else {
resolver_ = std::make_unique<::tflite::ops::builtin::BuiltinOpResolver>();
}
resolver_->AddCustom("validation/call",
::tflite::acceleration::ops::Register_CALL(), 1);
resolver_->AddCustom(
"validation/decode_jpeg",
::tflite::acceleration::decode_jpeg_kernel::Register_DECODE_JPEG(), 1);
RegisterSelectedOps(resolver_.get());
tflite::InterpreterBuilder builder(*model_loader_->GetModel(), *resolver_);
if (delegate_ != nullptr) {
builder.AddDelegate(delegate_.get());
}
if (opaque_delegate_ != nullptr) {
builder.AddDelegate(opaque_delegate_.get());
}
TfLiteStatus status = builder(&interpreter_);
if (!interpreter_) {
*delegate_error_out =
delegate_plugin_ ? delegate_plugin_->GetDelegateErrno(delegate_.get())
: 0;
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Creating Interpreter failed with error code %d.", status);
return kMinibenchmarkInterpreterBuilderFailed;
}
main_model_ = interpreter_->subgraph(0);
validation_entrypoint_index_ = -1;
for (int i = 0; i < interpreter_->subgraphs_size(); i++) {
Subgraph* subgraph = interpreter_->subgraph(i);
if (subgraph->GetName() == kValidationGraphName) {
validation_entrypoint_index_ = i;
validation_entrypoint_ = subgraph;
} else if (subgraph->GetName() == "VALIDATION:metrics") {
has_accuracy_validation_ = true;
}
}
if (!validation_entrypoint_) {
return kMinibenchmarkValidationSubgraphNotFound;
}
if (validation_entrypoint_->inputs().empty()) {
return kMinibenchmarkValidationSubgraphHasTooFewInputs;
}
if (!HasTensorData(model_loader_.get(), *validation_entrypoint_,
validation_entrypoint_->inputs().back())) {
return kMinibenchmarkValidationInputMissing;
}
if (validation_entrypoint_->AllocateTensors() != kTfLiteOk) {
return kMinibenchmarkAllocateTensorsFailed;
}
absl::flat_hash_set<int> checked_node_ids;
int num_delegated_kernels = 0;
for (int i = 0; i < interpreter_->execution_plan().size(); ++i) {
int node_id = interpreter_->execution_plan()[i];
if (checked_node_ids.find(node_id) != checked_node_ids.end()) {
continue;
}
const TfLiteNode& node =
interpreter_->node_and_registration(node_id)->first;
if (node.delegate != nullptr) {
num_delegated_kernels++;
checked_node_ids.insert(node_id);
}
}
*delegated_kernels_out = num_delegated_kernels;
bool fully_delegated = (num_delegated_kernels == 1 &&
interpreter_->execution_plan().size() == 1);
if (!fully_delegated) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"The model will be %s executed by the delegate.",
num_delegated_kernels > 0 ? "partially" : "not");
}
return kMinibenchmarkSuccess;
}
Validator::Status Validator::RunValidation(Results* results_out) {
BenchmarkStage stage = BenchmarkStage_INITIALIZATION;
if (!results_out) {
return Validator::Status{kMinibenchmarkPreconditionNotMet, stage};
}
if (!model_loader_) {
return Validator::Status{kMinibenchmarkModelReadFailed, stage};
}
if (!model_loader_->Init()) {
return Validator::Status{kMinibenchmarkModelInitFailed, stage};
}
#define MB_RETURN_IF_ERROR(s, bs) \
{ \
MinibenchmarkStatus c = (s); \
if (c != kMinibenchmarkSuccess) return Validator::Status{c, (bs)}; \
}
int64_t delegate_load_start_time_us = ElapsedTimeMicros();
MB_RETURN_IF_ERROR(LoadOpaqueDelegate(), stage);
MB_RETURN_IF_ERROR(LoadDelegate(), stage);
MB_RETURN_IF_ERROR(CreateInterpreter(&results_out->delegate_error,
&results_out->delegated_kernels),
stage);
int64_t delegate_load_end_time_us = ElapsedTimeMicros();
ValidatorProfiler profiler;
stage = BenchmarkStage_INFERENCE;
if (has_accuracy_validation_) {
MB_RETURN_IF_ERROR(CheckGoldenOutput(results_out), stage);
}
main_model_->SetProfiler(&profiler, 0);
TfLiteStatus status = validation_entrypoint_->Invoke();
main_model_->SetProfiler(nullptr, 0);
if (status != kTfLiteOk) {
MB_RETURN_IF_ERROR(kMinibenchmarkInvokeFailed, stage);
}
int model_output_size = main_model_->outputs().size();
if (has_accuracy_validation_) {
const std::string kMetricPrefix = "metrics/";
const std::string kOk("ok");
for (int i = model_output_size;
i < validation_entrypoint_->outputs().size(); i++) {
TfLiteTensor* tensor =
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]);
std::string name = tensor->name;
if (name.find(kMetricPrefix) != 0) {
continue;
}
name = name.substr(kMetricPrefix.size());
if (kOk == name) {
results_out->ok = *(tensor->data.b);
} else {
std::vector<float> values;
int count = 1;
for (int j = 0; j < tensor->dims->size; j++) {
count *= tensor->dims->data[j];
}
values.reserve(count);
for (int j = 0; j < count; j++) {
values.push_back(tensor->data.f[j]);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " %s %.4f", name.c_str(),
tensor->data.f[j]);
}
results_out->metrics[name] = values;
}
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " accuracy: %s",
results_out->ok ? "ok" : "not ok");
} else {
results_out->actual_inference_output.clear();
results_out->actual_inference_output.reserve(model_output_size);
for (int i = 0; i < model_output_size; i++) {
AppendTensorDataToVector(
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]),
results_out->actual_inference_output);
}
}
results_out->delegate_prep_time_us =
(delegate_load_end_time_us == -1 || delegate_load_start_time_us == -1)
? -1
: delegate_load_end_time_us - delegate_load_start_time_us;
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Delegate preparation took %d us",
static_cast<int>(results_out->delegate_prep_time_us));
for (const auto& e : profiler.events()) {
if (e.tag == "Invoke" && e.start_time_us != -1 && e.end_time_us != -1) {
results_out->execution_time_us.push_back(e.end_time_us - e.start_time_us);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Inference took %d us",
static_cast<int>(e.end_time_us - e.start_time_us));
}
}
#undef MB_RETURN_IF_ERROR
return Validator::Status{kMinibenchmarkSuccess};
}
int64_t Validator::BootTimeMicros() { return ElapsedTimeMicros(); }
int64_t Validator::WallTimeMicros() {
struct timespec ts;
#ifndef _WIN32
int err = clock_gettime(CLOCK_REALTIME, &ts);
#else
int err = 1;
#endif
if (err) {
return -1;
}
return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#if FLATBUFFERS_LITTLEENDIAN == 0
#include "tensorflow/lite/core/model_builder.h"
#endif
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using flatbuffers::FlatBufferBuilder;
constexpr int kOutputTensorSize = 1001;
class ValidatorTest : public ::testing::Test {
protected:
void SetUp() override {
std::string validation_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!validation_model_path.empty());
validation_model_loader_ =
std::make_unique<tools::PathModelLoader>(validation_model_path);
std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path.empty());
plain_model_loader_ =
std::make_unique<tools::PathModelLoader>(plain_model_path);
compute_settings_fbb_.Finish(CreateComputeSettings(compute_settings_fbb_));
default_compute_settings_ = flatbuffers::GetRoot<ComputeSettings>(
compute_settings_fbb_.GetBufferPointer());
}
std::unique_ptr<tools::ModelLoader> validation_model_loader_;
std::unique_ptr<tools::ModelLoader> plain_model_loader_;
FlatBufferBuilder compute_settings_fbb_;
const ComputeSettings* default_compute_settings_;
};
TEST_F(ValidatorTest, HappyPathOnCpuWithEmbeddedValidation) {
ASSERT_TRUE(validation_model_loader_->Init());
Validator validator(std::move(validation_model_loader_),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);
EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);
EXPECT_TRUE(results.ok);
EXPECT_GE(results.metrics.size(), 0);
EXPECT_EQ(results.delegate_error, 0);
EXPECT_TRUE(results.actual_inference_output.empty());
}
TEST_F(ValidatorTest, HappyPathOnCpuWithCustomValidation) {
ASSERT_TRUE(plain_model_loader_->Init());
ASSERT_TRUE(validation_model_loader_->Init());
const SubGraph* main_model =
plain_model_loader_->GetModel()->GetModel()->subgraphs()->Get(0);
const int model_output_size = main_model->outputs()->size();
int model_input_byte_size = 1;
for (int shape_i :
*main_model->tensors()->Get(main_model->inputs()->Get(0))->shape()) {
model_input_byte_size *= shape_i;
}
int batch_size = 5;
FlatBufferBuilder model_with_input;
CustomValidationEmbedder embedder(
batch_size,
{std::vector<uint8_t>(batch_size * model_input_byte_size, 1)});
EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),
model_with_input),
kMinibenchmarkSuccess);
std::string serialized_str(
reinterpret_cast<const char*>(model_with_input.GetBufferPointer()),
model_with_input.GetSize());
#if FLATBUFFERS_LITTLEENDIAN == 0
tflite::FlatBufferModel::ByteSwapSerializedModel(&serialized_str, true);
#endif
std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_input.tflite",
reinterpret_cast<const unsigned char*>(serialized_str.c_str()),
serialized_str.size());
ASSERT_TRUE(!model_path.empty());
auto model_loader = std::make_unique<tools::PathModelLoader>(model_path);
Validator validator(std::move(model_loader), default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);
EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);
EXPECT_FALSE(results.ok);
EXPECT_EQ(results.metrics.size(), 0);
EXPECT_EQ(results.delegate_error, 0);
EXPECT_EQ(results.actual_inference_output.size(), model_output_size);
EXPECT_EQ(results.actual_inference_output[0].size(),
batch_size * kOutputTensorSize);
}
TEST_F(ValidatorTest, DelegateNotSupported) {
proto::ComputeSettings settings_proto;
settings_proto.mutable_tflite_settings()->set_delegate(proto::CORE_ML);
flatbuffers::FlatBufferBuilder fbb;
const ComputeSettings* settings = ConvertFromProto(settings_proto, &fbb);
Validator validator(std::move(validation_model_loader_), settings);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkDelegateNotSupported);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, NoValidationSubgraph) {
Validator validator(std::move(plain_model_loader_),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkValidationSubgraphNotFound);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, NoValidationInputData) {
ASSERT_TRUE(plain_model_loader_->Init());
FlatBufferBuilder model_with_input;
CustomValidationEmbedder embedder(1, {{}});
EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),
model_with_input),
kMinibenchmarkSuccess);
std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_input.tflite", model_with_input.GetBufferPointer(),
model_with_input.GetSize());
ASSERT_TRUE(!model_path.empty());
auto model_loader = std::make_unique<tools::PathModelLoader>(model_path);
Validator validator(std::move(model_loader), default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkValidationInputMissing);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, InvalidModel) {
const std::string dump_path = MiniBenchmarkTestHelper::DumpToTempFile(
"foo.tflite", g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len - 12000);
ASSERT_TRUE(!dump_path.empty());
Validator validator(std::make_unique<tools::PathModelLoader>(dump_path),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkModelInitFailed);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, EmptyModelLoader) {
Validator validator(nullptr, default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkModelReadFailed);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
}
}
} |
741 | cpp | tensorflow/tensorflow | validator_runner | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_RUNNER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_VALIDATOR_RUNNER_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
namespace tflite {
namespace acceleration {
class ValidatorRunner {
public:
static constexpr int64_t kDefaultEventTimeoutUs = 30 * 1000 * 1000;
explicit ValidatorRunner(const ValidatorRunnerOptions& options);
MinibenchmarkStatus Init();
int TriggerMissingValidation(
const std::vector<const TFLiteSettings*>& for_settings);
std::vector<const BenchmarkEvent*> GetSuccessfulResults() {
return validator_runner_impl_->GetSuccessfulResultsFromStorage();
}
int GetNumCompletedResults() {
return validator_runner_impl_->GetNumCompletedResults();
}
std::vector<const BenchmarkEvent*> GetAndFlushEventsToLog(
int64_t timeout_us = kDefaultEventTimeoutUs);
private:
const std::string storage_path_;
FlatbufferStorage<BenchmarkEvent> storage_;
ErrorReporter* error_reporter_;
bool triggered_ = false;
std::unique_ptr<ValidatorRunnerImpl> validator_runner_impl_;
};
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace acceleration {
constexpr int kMaxAttempts = 2;
ValidatorRunner::ValidatorRunner(const ValidatorRunnerOptions& options)
: storage_path_(options.storage_path),
storage_(options.storage_path, options.error_reporter),
error_reporter_(options.error_reporter) {
validator_runner_impl_ = std::make_unique<ValidatorRunnerImpl>(
CreateModelLoaderPath(options), options.storage_path,
options.data_directory_path, options.per_test_timeout_ms,
options.custom_input_data.empty()
? nullptr
: std::make_unique<CustomValidationEmbedder>(
options.custom_input_batch_size, options.custom_input_data,
options.error_reporter),
error_reporter_, options.nnapi_sl, options.gpu_plugin_handle,
options.validation_entrypoint_name, options.benchmark_result_evaluator);
}
MinibenchmarkStatus ValidatorRunner::Init() {
MinibenchmarkStatus status = storage_.Read();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Storage::Read failed");
return status;
}
return validator_runner_impl_->Init();
}
int ValidatorRunner::TriggerMissingValidation(
const std::vector<const TFLiteSettings*>& for_settings) {
if (triggered_) {
return 0;
}
triggered_ = true;
storage_.Read();
std::vector<flatbuffers::FlatBufferBuilder> to_be_run;
for (auto settings : for_settings) {
TFLiteSettingsT tflite_settings;
settings->UnPackTo(&tflite_settings);
int started_count = 0;
int results_count = 0;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() == BenchmarkEventType_LOGGED) {
continue;
}
if (!event->tflite_settings()) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Got previous event %d type %d with no tflite settings",
i, static_cast<int>(event->event_type()));
continue;
}
TFLiteSettingsT event_settings;
event->tflite_settings()->UnPackTo(&event_settings);
if (event_settings != tflite_settings) {
continue;
}
if (event->event_type() == BenchmarkEventType_START) {
started_count++;
} else if (event->event_type() == BenchmarkEventType_END) {
results_count++;
}
}
if (results_count > 0 || started_count >= kMaxAttempts) {
continue;
}
flatbuffers::FlatBufferBuilder copy;
copy.Finish(CreateTFLiteSettings(copy, &tflite_settings));
to_be_run.emplace_back(std::move(copy));
}
int to_be_run_count = to_be_run.size();
validator_runner_impl_->TriggerValidationAsync(std::move(to_be_run),
storage_path_);
return to_be_run_count;
}
std::vector<const BenchmarkEvent*> ValidatorRunner::GetAndFlushEventsToLog(
int64_t timeout_us) {
std::vector<const BenchmarkEvent*> events;
storage_.Read();
if (storage_.Count() == 0) {
return events;
}
const BenchmarkEvent* last = storage_.Get(storage_.Count() - 1);
if (!last || last->event_type() == BenchmarkEventType_LOGGED) {
return events;
}
bool has_pending_event = false;
for (int i = storage_.Count() - 1; i >= 0; i--) {
const BenchmarkEvent* event = storage_.Get(i);
if (!event || event->event_type() == BenchmarkEventType_LOGGED) {
break;
} else if (event->event_type() == BenchmarkEventType_END ||
event->event_type() == BenchmarkEventType_ERROR) {
break;
} else if (event->event_type() == BenchmarkEventType_START &&
std::abs(event->boottime_us() - Validator::BootTimeMicros()) <
timeout_us) {
has_pending_event = true;
}
}
if (has_pending_event) {
return events;
}
flatbuffers::FlatBufferBuilder fbb;
int64_t boottime_us = Validator::BootTimeMicros();
storage_.Append(
&fbb, CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_LOGGED,
0, 0, boottime_us,
Validator::WallTimeMicros()));
storage_.Read();
bool seen_end = false;
for (int i = storage_.Count() - 1; i >= 0; i--) {
const BenchmarkEvent* event = storage_.Get(i);
if (!event || (event->event_type() == BenchmarkEventType_LOGGED &&
event->boottime_us() != boottime_us)) {
break;
}
if (event->event_type() == BenchmarkEventType_END ||
event->event_type() == BenchmarkEventType_ERROR ||
event->event_type() == BenchmarkEventType_RECOVERED_ERROR) {
events.push_back(event);
seen_end = true;
} else if (event->event_type() == BenchmarkEventType_START) {
if (!seen_end) {
events.push_back(event);
} else {
seen_end = false;
}
}
}
return events;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner.h"
#include <fcntl.h>
#include <fstream>
#include <iostream>
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_validator_runner_entrypoint.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
std::vector<const TFLiteSettings*> BuildBenchmarkSettings(
const AndroidInfo& android_info, flatbuffers::FlatBufferBuilder& fbb_cpu,
flatbuffers::FlatBufferBuilder& fbb_nnapi,
flatbuffers::FlatBufferBuilder& fbb_gpu,
bool ignore_android_version = false) {
std::vector<const TFLiteSettings*> settings;
fbb_cpu.Finish(CreateTFLiteSettings(fbb_cpu, Delegate_NONE,
CreateNNAPISettings(fbb_cpu)));
settings.push_back(
flatbuffers::GetRoot<TFLiteSettings>(fbb_cpu.GetBufferPointer()));
if (ignore_android_version || android_info.android_sdk_version >= "28") {
fbb_nnapi.Finish(CreateTFLiteSettings(fbb_nnapi, Delegate_NNAPI,
CreateNNAPISettings(fbb_nnapi)));
settings.push_back(
flatbuffers::GetRoot<TFLiteSettings>(fbb_nnapi.GetBufferPointer()));
}
#ifdef __ANDROID__
fbb_gpu.Finish(CreateTFLiteSettings(fbb_gpu, Delegate_GPU));
settings.push_back(
flatbuffers::GetRoot<TFLiteSettings>(fbb_gpu.GetBufferPointer()));
#endif
return settings;
}
std::string GetTargetDeviceName(const BenchmarkEvent* event) {
if (event->tflite_settings()->delegate() == Delegate_GPU) {
return "GPU";
} else if (event->tflite_settings()->delegate() == Delegate_NNAPI) {
return "NNAPI";
}
return "CPU";
}
class ValidatorRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
if (!should_perform_test_) {
return;
}
model_path_ = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!model_path_.empty());
}
void CheckConfigurations(bool use_path = true) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
ValidatorRunnerOptions options;
options.data_directory_path = ::testing::TempDir();
options.storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(options.storage_path.c_str());
if (use_path) {
options.model_path = model_path_;
} else {
options.model_fd = open(model_path_.c_str(), O_RDONLY);
ASSERT_GE(options.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options.model_fd, &stat_buf), 0);
options.model_size = stat_buf.st_size;
options.model_offset = 0;
}
auto validator1 = std::make_unique<ValidatorRunner>(options);
auto validator2 = std::make_unique<ValidatorRunner>(options);
ASSERT_EQ(validator1->Init(), kMinibenchmarkSuccess);
ASSERT_EQ(validator2->Init(), kMinibenchmarkSuccess);
std::vector<const BenchmarkEvent*> events =
validator1->GetAndFlushEventsToLog();
ASSERT_TRUE(events.empty());
flatbuffers::FlatBufferBuilder fbb_cpu, fbb_nnapi, fbb_gpu;
std::vector<const TFLiteSettings*> settings =
BuildBenchmarkSettings(android_info, fbb_cpu, fbb_nnapi, fbb_gpu);
ASSERT_EQ(validator1->TriggerMissingValidation(settings), settings.size());
int event_count = 0;
while (event_count < settings.size()) {
events = validator1->GetAndFlushEventsToLog();
event_count += events.size();
for (const BenchmarkEvent* event : events) {
std::string delegate_name = GetTargetDeviceName(event);
if (event->event_type() == BenchmarkEventType_END) {
if (event->result()->ok()) {
std::cout << "Validation passed on " << delegate_name << std::endl;
} else {
std::cout << "Validation did not pass on " << delegate_name
<< std::endl;
}
} else if (event->event_type() == BenchmarkEventType_ERROR) {
std::cout << "Failed to run validation on " << delegate_name
<< std::endl;
}
}
#ifndef _WIN32
sleep(1);
#endif
}
EXPECT_EQ(validator2->TriggerMissingValidation(settings), 0);
}
bool should_perform_test_ = true;
std::string model_path_;
};
TEST_F(ValidatorRunnerTest, AllConfigurationsWithFilePath) {
CheckConfigurations(true);
}
TEST_F(ValidatorRunnerTest, AllConfigurationsWithFd) {
CheckConfigurations(false);
}
using ::tflite::nnapi::NnApiSupportLibrary;
std::unique_ptr<const NnApiSupportLibrary> LoadNnApiSupportLibrary() {
MiniBenchmarkTestHelper helper;
std::string nnapi_sl_path = helper.DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
std::unique_ptr<const NnApiSupportLibrary> nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_path);
return nnapi_sl;
}
TEST_F(ValidatorRunnerTest, ShouldUseNnApiSl) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
InitNnApiSlInvocationStatus();
std::unique_ptr<const NnApiSupportLibrary> nnapi_sl =
LoadNnApiSupportLibrary();
ASSERT_THAT(nnapi_sl.get(), ::testing::NotNull());
ValidatorRunnerOptions options;
options.model_path = model_path_;
options.storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(options.storage_path.c_str());
options.data_directory_path = ::testing::TempDir();
options.nnapi_sl = nnapi_sl->getFL5();
ValidatorRunner validator(options);
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<const BenchmarkEvent*> events =
validator.GetAndFlushEventsToLog();
ASSERT_TRUE(events.empty());
flatbuffers::FlatBufferBuilder fbb_cpu, fbb_nnapi, fbb_gpu;
std::vector<const TFLiteSettings*> settings =
BuildBenchmarkSettings(android_info, fbb_cpu, fbb_nnapi, fbb_gpu,
true);
ASSERT_EQ(validator.TriggerMissingValidation(settings), settings.size());
int event_count = 0;
while (event_count < settings.size()) {
events = validator.GetAndFlushEventsToLog();
event_count += events.size();
#ifndef _WIN32
sleep(1);
#endif
}
ASSERT_EQ(validator.TriggerMissingValidation(settings), 0);
EXPECT_TRUE(WasNnApiSlInvoked());
}
TEST_F(ValidatorRunnerTest, ShouldFailIfItCannotFindNnApiSlPath) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
std::string storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(storage_path.c_str());
NnApiSLDriverImplFL5 wrong_handle_nnapi_sl{};
ValidatorRunnerOptions options;
options.model_path = model_path_;
options.storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(options.storage_path.c_str());
options.data_directory_path = ::testing::TempDir();
options.nnapi_sl = &wrong_handle_nnapi_sl;
ValidatorRunner validator(options);
ASSERT_EQ(validator.Init(), kMiniBenchmarkCannotLoadSupportLibrary);
}
}
}
} |
742 | cpp | tensorflow/tensorflow | custom_validation_embedder | tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_MODEL_MODIFIER_CUSTOM_VALIDATION_EMBEDDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_MODEL_MODIFIER_CUSTOM_VALIDATION_EMBEDDER_H_
#include <utility>
#include <vector>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/stderr_reporter.h"
namespace tflite {
namespace acceleration {
class CustomValidationEmbedder {
public:
CustomValidationEmbedder(
int batch_size, std::vector<std::vector<uint8_t>> custom_input,
ErrorReporter* error_reporter = DefaultErrorReporter())
: batch_size_(batch_size),
custom_input_(std::move(custom_input)),
error_reporter_(error_reporter) {}
CustomValidationEmbedder(CustomValidationEmbedder&&) = default;
CustomValidationEmbedder& operator=(CustomValidationEmbedder&&) = default;
MinibenchmarkStatus BuildModel(const Model& main_model,
flatbuffers::FlatBufferBuilder& fbb);
private:
void CreateTensorsFrom(const SubGraph& from_subgraph,
const std::vector<int>& from_indexes,
std::vector<std::vector<uint8_t>>* buffer_content,
flatbuffers::FlatBufferBuilder& fbb,
std::vector<int>& new_indexes,
std::vector<flatbuffers::Offset<Buffer>>& buffers,
std::vector<flatbuffers::Offset<Tensor>>& tensors);
int batch_size_;
std::vector<std::vector<uint8_t>> custom_input_;
ErrorReporter* error_reporter_;
};
}
}
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include <algorithm>
#include <iostream>
#include <iterator>
#include <string>
#include <vector>
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/tools/verifier.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> CallOpCustomOptions(
int primary_subgraph_index, int batch_size, FlatBufferBuilder& output) {
flexbuffers::Builder flexbuffer_builder;
flexbuffer_builder.Map([&] {
flexbuffer_builder.Int("subgraph_index", primary_subgraph_index);
flexbuffer_builder.Int("loop_count", batch_size);
});
flexbuffer_builder.Finish();
return output.CreateVector(flexbuffer_builder.GetBuffer());
}
}
void CustomValidationEmbedder::CreateTensorsFrom(
const SubGraph& from_subgraph, const std::vector<int>& from_indexes,
std::vector<std::vector<uint8_t>>* buffer_content,
flatbuffers::FlatBufferBuilder& fbb, std::vector<int>& new_indexes,
std::vector<flatbuffers::Offset<Buffer>>& buffers,
std::vector<flatbuffers::Offset<Tensor>>& tensors) {
int tensor_index_start = tensors.size();
for (int i = 0; i < from_indexes.size(); i++) {
TensorT base_tensor;
from_subgraph.tensors()->Get(from_indexes[i])->UnPackTo(&base_tensor);
if (!base_tensor.shape.empty() && base_tensor.shape[0] == 1) {
base_tensor.shape[0] = batch_size_;
}
if (!base_tensor.shape_signature.empty() &&
base_tensor.shape_signature[0] == 1) {
base_tensor.shape_signature[0] = batch_size_;
}
base_tensor.buffer = buffers.size();
tensors.push_back(CreateTensor(fbb, &base_tensor));
new_indexes.push_back(tensor_index_start + i);
if (buffer_content && !(*buffer_content)[i].empty()) {
buffers.push_back(
CreateBuffer(fbb, fbb.CreateVector((*buffer_content)[i])));
} else {
buffers.push_back(CreateBuffer(fbb));
}
}
}
MinibenchmarkStatus CustomValidationEmbedder::BuildModel(
const Model& main_model, flatbuffers::FlatBufferBuilder& fbb) {
ModelT main_model_obj;
main_model.UnPackTo(&main_model_obj);
if (main_model_obj.subgraphs[0]->inputs.size() != custom_input_.size()) {
TF_LITE_REPORT_ERROR(
error_reporter_,
"Unexpected custom_input size. Expected: %d. Actual: %d.",
main_model_obj.subgraphs[0]->inputs.size(), custom_input_.size());
return kMinibenchmarkValidationSubgraphBuildFailed;
}
std::vector<flatbuffers::Offset<Metadata>> metadata;
metadata.reserve(main_model_obj.metadata.size());
for (auto& iter : main_model_obj.metadata) {
metadata.push_back(CreateMetadata(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<SignatureDef>> signature_defs;
signature_defs.reserve(main_model_obj.signature_defs.size());
for (auto& iter : main_model_obj.signature_defs) {
signature_defs.push_back(CreateSignatureDef(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<SubGraph>> subgraphs;
subgraphs.reserve(main_model_obj.subgraphs.size());
for (auto& iter : main_model_obj.subgraphs) {
subgraphs.push_back(CreateSubGraph(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<Buffer>> buffers;
buffers.reserve(main_model_obj.buffers.size());
for (auto& iter : main_model_obj.buffers) {
buffers.push_back(CreateBuffer(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes;
operator_codes.reserve(main_model_obj.operator_codes.size());
for (auto& iter : main_model_obj.operator_codes) {
operator_codes.push_back(CreateOperatorCode(fbb, iter.get()));
}
operator_codes.push_back(CreateOperatorCode(
fbb, BuiltinOperator_CUSTOM, fbb.CreateString("validation/call")));
int operator_code_index = operator_codes.size() - 1;
std::vector<flatbuffers::Offset<Tensor>> tensors;
std::vector<int32_t> input;
CreateTensorsFrom(*main_model.subgraphs()->Get(0),
main_model_obj.subgraphs[0]->inputs, &custom_input_, fbb,
input, buffers, tensors);
std::vector<int32_t> output;
CreateTensorsFrom(*main_model.subgraphs()->Get(0),
main_model_obj.subgraphs[0]->outputs, nullptr, fbb, output,
buffers, tensors);
auto input_offset = fbb.CreateVector(input);
auto output_offset = fbb.CreateVector(output);
std::vector<flatbuffers::Offset<Operator>> operators{CreateOperator(
fbb, operator_code_index, input_offset, output_offset,
tflite::BuiltinOptions_NONE, 0,
CallOpCustomOptions( 0, batch_size_, fbb),
tflite::CustomOptionsFormat_FLEXBUFFERS)};
subgraphs.push_back(
CreateSubGraph(fbb, fbb.CreateVector(tensors), input_offset,
output_offset, fbb.CreateVector(operators),
fbb.CreateString(std::string(kValidationGraphName))));
fbb.Finish(
CreateModel(fbb, kModelSchemaVersion, fbb.CreateVector(operator_codes),
fbb.CreateVector(subgraphs),
fbb.CreateString(main_model_obj.description),
fbb.CreateVector(buffers),
0, fbb.CreateVector(metadata),
fbb.CreateVector(signature_defs)),
"TFL3");
if (Verify(fbb.GetBufferPointer(), fbb.GetSize(), error_reporter_)) {
return kMinibenchmarkSuccess;
} else {
return kMinibenchmarkValidationSubgraphBuildFailed;
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
constexpr int kMobileNetModelInputByteSize = 1 * 224 * 224 * 3;
class CustomValidationEmbedderTest : public ::testing::Test {
protected:
void SetUp() override {
std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path.empty());
plain_model_loader_ =
std::make_unique<tools::PathModelLoader>(plain_model_path);
ASSERT_TRUE(plain_model_loader_->Init());
}
std::unique_ptr<tools::ModelLoader> plain_model_loader_;
};
TEST_F(CustomValidationEmbedderTest, BuildValidationModelSucceed) {
int batch_size = 5;
std::vector<uint8_t> input_buffer(batch_size * kMobileNetModelInputByteSize);
CustomValidationEmbedder embedder(batch_size, {input_buffer});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkSuccess);
auto model =
FlatBufferModel::BuildFromModel(GetModel(fbb.GetBufferPointer()));
auto interpreter = std::make_unique<Interpreter>();
auto resolver = std::make_unique<
::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
resolver->AddCustom("validation/call", ops::Register_CALL(), 1);
ASSERT_EQ(InterpreterBuilder(*model, *resolver)(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
Subgraph* validation_graph = interpreter->subgraph(1);
EXPECT_THAT(input_buffer, testing::ElementsAreArray(
GetTensorData<uint8_t>(validation_graph->tensor(
validation_graph->inputs()[0])),
input_buffer.size()));
EXPECT_EQ(validation_graph->AllocateTensors(), kTfLiteOk);
EXPECT_EQ(validation_graph->Invoke(), kTfLiteOk);
}
TEST_F(CustomValidationEmbedderTest, BuildValidationModelTooManyInput) {
int batch_size = 5;
CustomValidationEmbedder embedder(batch_size, {{}, {}});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkValidationSubgraphBuildFailed);
}
TEST_F(CustomValidationEmbedderTest, BuildValidationModelInvalidBufferSize) {
CustomValidationEmbedder embedder(2, {std::vector<uint8_t>(2, 2)});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkValidationSubgraphBuildFailed);
}
}
}
} |
743 | cpp | tensorflow/tensorflow | tensor | third_party/xla/xla/mlir/tools/mlir_interpreter/dialects/tensor.cc | tensorflow/cc/experimental/libtf/tests/tensor_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_BASE_PUBLIC_TENSOR_H_
#define TENSORFLOW_CC_EXPERIMENTAL_BASE_PUBLIC_TENSOR_H_
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/cc/experimental/base/public/status.h"
namespace tensorflow {
namespace experimental {
namespace cc {
class Tensor {
public:
using DeleterCallback = std::function<void(void*, size_t)>;
static Tensor FromBuffer(TF_DataType dtype, const std::vector<int64_t>& shape,
void* data, size_t len, DeleterCallback deleter,
Status* status);
Tensor(Tensor&&) = default;
Tensor& operator=(Tensor&&) = default;
int dims() const;
int64_t dim_size(int d) const;
void* data() const;
TF_DataType dtype() const;
int64_t num_elements() const;
size_t num_bytes() const;
private:
friend class TensorHandle;
friend class Runtime;
explicit Tensor(TF_Tensor* tensor) : tensor_(tensor) {}
Tensor(const Tensor&) = delete;
Tensor& operator=(const Tensor&) = delete;
TF_Tensor* GetTFTensor() const { return tensor_.get(); }
struct DeleterStruct {
std::function<void(void*, size_t)> deleter;
};
static void DeleterFunction(void* memory, size_t len, void* deleter_struct) {
DeleterStruct* deleter = reinterpret_cast<DeleterStruct*>(deleter_struct);
deleter->deleter(memory, len);
delete deleter;
}
struct TFTensorDeleter {
void operator()(TF_Tensor* p) const { TF_DeleteTensor(p); }
};
std::unique_ptr<TF_Tensor, TFTensorDeleter> tensor_;
};
inline void* Tensor::data() const { return TF_TensorData(tensor_.get()); }
inline int Tensor::dims() const { return TF_NumDims(tensor_.get()); }
inline int64_t Tensor::dim_size(int d) const {
return TF_Dim(tensor_.get(), d);
}
inline TF_DataType Tensor::dtype() const {
return TF_TensorType(tensor_.get());
}
inline int64_t Tensor::num_elements() const {
return TF_TensorElementCount(tensor_.get());
}
inline size_t Tensor::num_bytes() const {
return TF_TensorByteSize(tensor_.get());
}
inline Tensor Tensor::FromBuffer(TF_DataType dtype,
const std::vector<int64_t>& shape, void* data,
size_t len, DeleterCallback deleter,
Status* status) {
DeleterStruct* deleter_struct = new DeleterStruct{deleter};
TF_Tensor* tensor = TF_NewTensor(dtype, shape.data(), shape.size(), data, len,
&DeleterFunction, deleter_struct);
if (tensor == nullptr) {
status->SetStatus(TF_INVALID_ARGUMENT,
"Failed to create tensor for input buffer");
return Tensor(nullptr);
}
return Tensor(tensor);
}
}
}
}
#endif
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include <cassert>
#include <cstdint>
#include <optional>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/dialects/util.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value_util.h"
#include "xla/mlir/tools/mlir_interpreter/framework/registration.h"
namespace mlir {
namespace interpreter {
namespace {
int64_t Dim(InterpreterState& state, tensor::DimOp,
const InterpreterValue& tensor, int64_t dim) {
return DimImpl(tensor, dim, state);
}
InterpreterValue Empty(InterpreterState&, tensor::EmptyOp op,
ArrayRef<int64_t> dynamic_sizes) {
auto ty = op->getResultTypes().front().cast<mlir::ShapedType>();
auto shape = ReplaceDynamicVals(ty.getShape(), dynamic_sizes);
return InterpreterValue::MakeTensor(ty.getElementType(), shape);
}
InterpreterValue Extract(InterpreterState& state, tensor::ExtractOp,
InterpreterValue tensor, ArrayRef<int64_t> indices) {
if (!tensor.View().InBounds(indices)) {
state.AddFailure("array index out of bounds");
return {};
}
return tensor.ExtractElement(indices);
}
InterpreterValue FromElements(InterpreterState&, tensor::FromElementsOp op,
MutableArrayRef<InterpreterValue> elements) {
auto ty = op->getResultTypes().front().cast<mlir::ShapedType>();
auto result = InterpreterValue::MakeTensor(ty.getElementType(),
llvm::to_vector(ty.getShape()));
for (auto [index, element] : llvm::zip(result.View().Indices(), elements)) {
result.InsertElement(index, element);
}
return result;
}
llvm::SmallVector<InterpreterValue> CollapseShape(
InterpreterState&, tensor::CollapseShapeOp op,
const InterpreterValue& tensor) {
SmallVector<int64_t> sizes;
for (const auto& indices : op.getReassociationIndices()) {
int64_t size = 1;
for (auto dim : indices) {
size *= tensor.View().sizes[dim];
}
sizes.push_back(size);
}
return {ReshapeTensor(tensor, sizes)};
}
llvm::SmallVector<InterpreterValue> ExpandShape(
InterpreterState&, tensor::ExpandShapeOp op,
const InterpreterValue& tensor) {
auto ty = cast<ShapedType>(op->getResultTypes().front());
auto sizes = llvm::to_vector(ty.getShape());
for (const auto& [src_index, dst_indices] :
llvm::enumerate(op.getReassociationIndices())) {
int64_t size = tensor.View().sizes[src_index];
std::optional<int64_t> dyn_index = std::nullopt;
for (auto dim : dst_indices) {
if (sizes[dim] < 0) {
dyn_index = dim;
} else {
size /= sizes[dim];
}
}
if (dyn_index) {
sizes[*dyn_index] = size;
}
}
return {ReshapeTensor(tensor, sizes)};
}
llvm::SmallVector<InterpreterValue> ExtractSlice(
InterpreterState&, tensor::ExtractSliceOp extract, InterpreterValue tensor,
ArrayRef<int64_t> dynamic_offsets, ArrayRef<int64_t> dynamic_sizes,
ArrayRef<int64_t> dynamic_strides) {
auto v = ExtractOffsetsSizesStrides(dynamic_offsets, dynamic_sizes,
dynamic_strides, extract);
int64_t rank = v.offsets.size();
auto out = tensor.TypedAlike(v.sizes);
out.Fill([&](llvm::ArrayRef<int64_t> indices) {
llvm::SmallVector<int64_t> src_indices;
for (int64_t i = 0; i < rank; ++i) {
src_indices.push_back(indices[i] * v.strides[i] + v.offsets[i]);
}
return tensor.ExtractElement(src_indices);
});
int64_t num_dropped = 0;
auto& out_view = out.View();
int64_t dim = 0;
const auto& result_sizes = extract.getResultType().getShape();
const auto& static_sizes = extract.getStaticSizes();
while (dim < out_view.Rank()) {
if (static_sizes[num_dropped + dim] == 1 &&
(dim >= result_sizes.size() || result_sizes[dim] != 1)) {
out_view.sizes.erase(out_view.sizes.begin() + dim);
out_view.strides.erase(out_view.strides.begin() + dim);
++num_dropped;
} else {
++dim;
}
}
return {out};
}
template <typename Op>
llvm::SmallVector<InterpreterValue> InsertSlice(
InterpreterState&, Op insert, InterpreterValue src, InterpreterValue dest,
ArrayRef<int64_t> dynamic_offsets, ArrayRef<int64_t> dynamic_sizes,
ArrayRef<int64_t> dynamic_strides) {
if (insert->getNumResults() == 1) {
dest = dest.Clone();
}
auto v = ExtractOffsetsSizesStrides(dynamic_offsets, dynamic_sizes,
dynamic_strides, insert);
auto static_sizes = insert.getStaticSizes();
llvm::SmallVector<int64_t> inserted_dims;
auto& src_view = src.View();
auto* src_size_it = src_view.sizes.begin();
for (auto [dim, size] : llvm::enumerate(static_sizes)) {
if (src_size_it == src_view.sizes.end() ||
(*src_size_it != size && size >= 0)) {
assert(size == 1 && "Can only insert unit dims");
inserted_dims.push_back(dim);
} else {
++src_size_it;
}
}
for (const auto& src_indices : src_view.Indices()) {
llvm::SmallVector<int64_t> src_with_inserted_dims = src_indices;
for (int64_t dim : inserted_dims) {
src_with_inserted_dims.insert(src_with_inserted_dims.begin() + dim, 0);
}
llvm::SmallVector<int64_t> dst_indices;
for (auto [src_index, stride, offset] :
llvm::zip(src_with_inserted_dims, v.strides, v.offsets)) {
dst_indices.push_back(src_index * stride + offset);
}
dest.InsertElement(dst_indices, src.ExtractElement(src_indices));
}
if (insert->getNumResults() == 1) {
return {dest};
}
return {};
}
InterpreterValue Generate(InterpreterState& state, tensor::GenerateOp generate,
ArrayRef<int64_t> dynamic_sizes) {
auto ty = generate->getResultTypes().front().cast<ShapedType>();
auto sizes = ReplaceDynamicVals(ty.getShape(), dynamic_sizes);
auto result = InterpreterValue::MakeTensor(ty.getElementType(), sizes);
result.Fill([&](ArrayRef<int64_t> indices) -> InterpreterValue {
auto values = Interpret(state, generate.getRegion(),
PackInterpreterValues<int64_t>(indices));
if (state.HasFailure()) {
return {result.ExtractElement(indices)};
}
return values.front();
});
return {result};
}
InterpreterValue Insert(InterpreterState& state, tensor::InsertOp,
const InterpreterValue& value,
const InterpreterValue& tensor,
ArrayRef<int64_t> indices) {
auto result = tensor.Clone();
if (result.View().InBounds(indices)) {
result.InsertElement(indices, value);
} else {
state.AddFailure("array index out of bounds");
}
return result;
}
InterpreterValue Pad(InterpreterState& state, tensor::PadOp pad,
InterpreterValue tensor, ArrayRef<int64_t> dynamic_lows,
ArrayRef<int64_t> dynamic_highs) {
auto lows = ReplaceDynamicVals(pad.getStaticLow(), dynamic_lows);
auto highs = ReplaceDynamicVals(pad.getStaticHigh(), dynamic_highs);
auto& view = tensor.View();
llvm::SmallVector<int64_t> result_sizes;
for (auto [size, low, high] : llvm::zip(view.sizes, lows, highs)) {
result_sizes.push_back(size + low + high);
}
auto result = tensor.TypedAlike(result_sizes);
result.Fill([&](llvm::ArrayRef<int64_t> out_index) -> InterpreterValue {
llvm::SmallVector<int64_t> in_index;
for (auto [index, low] : llvm::zip(out_index, lows)) {
in_index.push_back(index - low);
}
if (view.InBounds(in_index)) {
return tensor.ExtractElement(in_index);
}
return Interpret(state, pad.getRegion(), PackInterpreterValues(out_index))
.front();
});
return result;
}
REGISTER_MLIR_INTERPRETER_OP("tensor.cast",
"builtin.unrealized_conversion_cast");
REGISTER_MLIR_INTERPRETER_OP("tensor.yield", NoOpTerminator);
REGISTER_MLIR_INTERPRETER_OP(CollapseShape);
REGISTER_MLIR_INTERPRETER_OP(Dim);
REGISTER_MLIR_INTERPRETER_OP(Empty);
REGISTER_MLIR_INTERPRETER_OP(ExpandShape);
REGISTER_MLIR_INTERPRETER_OP(Extract);
REGISTER_MLIR_INTERPRETER_OP(ExtractSlice);
REGISTER_MLIR_INTERPRETER_OP(FromElements);
REGISTER_MLIR_INTERPRETER_OP(Generate);
REGISTER_MLIR_INTERPRETER_OP(Insert);
REGISTER_MLIR_INTERPRETER_OP(InsertSlice<tensor::InsertSliceOp>);
REGISTER_MLIR_INTERPRETER_OP(InsertSlice<tensor::ParallelInsertSliceOp>);
REGISTER_MLIR_INTERPRETER_OP(Pad);
}
}
} | #include <cstdint>
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
using AbstractContextPtr = tensorflow::AbstractContextPtr;
using AbstractContext = tensorflow::AbstractContext;
using AbstractTensorHandle = tensorflow::AbstractTensorHandle;
using TF_StatusPtr = tensorflow::TF_StatusPtr;
using Status = tensorflow::Status;
class UnifiedCAPI
: public ::testing::TestWithParam<std::tuple<const char*, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = tensorflow::StatusFromTF_Status(status.get());
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
}
};
namespace {
template <class T>
TaggedValue MakeContext(T runtime) {
AbstractContext* ctx_raw = nullptr;
Status s = BuildImmediateExecutionContext(runtime, &ctx_raw);
return TaggedValue::Capsule(static_cast<void*>(ctx_raw), [](void* p) {
tensorflow::internal::AbstractContextDeleter()(
static_cast<AbstractContext*>(p));
});
}
}
TEST_P(UnifiedCAPI, HoldTensors) {
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
impl::TaggedValueTensor x;
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.message();
x.reset(x_raw, false);
}
impl::TaggedValueTensor x2(x);
{
impl::TaggedValue tensor(std::move(x2));
auto list = TaggedValue::List();
list.list().emplace_back(3.f);
list.list().push_back(tensor);
list.list().emplace_back(std::move(tensor));
ASSERT_FALSE(x->RefCountIsOne());
}
ASSERT_TRUE(x->RefCountIsOne());
}
TaggedValue MakeScalarTensor(TaggedValue self, TaggedValue val) {
if (val.type() != TaggedValue::FLOAT32) return TaggedValue::None();
if (self.type() != TaggedValue::DICT) return TaggedValue::None();
TaggedValue ctx_capsule = (self.dict())[TaggedValue("context")];
AbstractContext* ctx = static_cast<AbstractContext*>(ctx_capsule.capsule());
AbstractTensorHandle* x_raw = nullptr;
Status s =
TestScalarTensorHandle<float, TF_FLOAT>(ctx, val.f32().get(), &x_raw);
if (!s.ok()) return TaggedValue::None();
return TaggedValue(impl::TaggedValueTensor(x_raw, false));
}
TEST_P(UnifiedCAPI, SimpleCreationFunctions) {
TaggedValue context = MakeContext(std::get<1>(GetParam()));
Object methods;
methods.Set(String("context"), Handle(MakeContext(std::get<1>(GetParam()))));
methods.Set(String("make_scalar_tensor"),
Callable(TaggedValue(MakeScalarTensor)));
Handle foo = *methods.Get<Callable>(String("make_scalar_tensor"))
->Call<Handle>(methods, Float(3.f));
}
INSTANTIATE_TEST_SUITE_P(Tracing, UnifiedCAPI,
::testing::Combine(::testing::Values("graphdef",
"mlir"),
::testing::Values(false)));
}
} |
744 | cpp | tensorflow/tensorflow | shape | third_party/xla/xla/python/ifrt/shape.cc | third_party/xla/xla/python/ifrt/shape_test.cc | #ifndef XLA_PYTHON_IFRT_SHAPE_H_
#define XLA_PYTHON_IFRT_SHAPE_H_
#include <stdbool.h>
#include <cstdint>
#include <ostream>
#include <string>
#include <utility>
#include <variant>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/shape.pb.h"
namespace xla {
namespace ifrt {
class Shape {
public:
static constexpr int kInlineDimensionSize = 6;
using Dimensions = absl::InlinedVector<int64_t, kInlineDimensionSize>;
explicit Shape(absl::Span<const int64_t> dims)
: dims_(Dimensions(dims.begin(), dims.end())) {}
Shape(const Shape&) = default;
Shape(Shape&&) = default;
Shape& operator=(const Shape&) = default;
Shape& operator=(Shape&&) = default;
static absl::StatusOr<Shape> FromProto(const ShapeProto& proto);
ShapeProto ToProto() const;
absl::Span<const int64_t> dims() const { return dims_; }
bool operator==(const Shape& other) const { return dims_ == other.dims_; }
bool operator!=(const Shape& other) const { return dims_ != other.dims_; }
int64_t num_elements() const;
std::string DebugString() const;
private:
Dimensions dims_;
};
class BoundedDynamicShapeTag {
public:
static constexpr int kInlineDimensionSize = 6;
using DynamicDimensions = absl::InlinedVector<bool, kInlineDimensionSize>;
explicit BoundedDynamicShapeTag(absl::Span<const bool> dynamic_dims)
: dynamic_dims_(
DynamicDimensions(dynamic_dims.begin(), dynamic_dims.end())) {
CHECK(absl::c_any_of(dynamic_dims_, [](bool b) { return b; }))
<< "At least one dimension needs to be dynamically sized.";
}
BoundedDynamicShapeTag(const BoundedDynamicShapeTag&) = default;
BoundedDynamicShapeTag(BoundedDynamicShapeTag&&) = default;
BoundedDynamicShapeTag& operator=(const BoundedDynamicShapeTag&) = default;
BoundedDynamicShapeTag& operator=(BoundedDynamicShapeTag&&) = default;
absl::Span<const bool> DynamicDims() const { return dynamic_dims_; }
bool operator==(const BoundedDynamicShapeTag& other) const {
return dynamic_dims_ == other.dynamic_dims_;
}
bool operator!=(const BoundedDynamicShapeTag& other) const {
return !(*this == other);
}
static absl::StatusOr<BoundedDynamicShapeTag> FromProto(
const BoundedDynamicShapeTagProto& proto);
BoundedDynamicShapeTagProto ToProto() const;
private:
DynamicDimensions dynamic_dims_;
};
using DynamicShapeTag = std::variant<BoundedDynamicShapeTag>;
class DynamicShape {
public:
static absl::StatusOr<DynamicShape> Create(Shape shape, DynamicShapeTag tag);
DynamicShape(const DynamicShape&) = default;
DynamicShape(DynamicShape&&) = default;
DynamicShape& operator=(const DynamicShape&) = default;
DynamicShape& operator=(DynamicShape&&) = default;
const DynamicShapeTag& GetTag() const { return tag_; }
bool operator==(const DynamicShape& other) const {
return tag_ == other.tag_ && shape_ == other.shape_;
}
bool operator!=(const DynamicShape& other) const { return !(*this == other); }
absl::StatusOr<Shape> GetPaddedShape() const;
bool IsDynamicDim(int dimension) const;
static absl::StatusOr<DynamicShape> FromProto(const DynamicShapeProto& proto);
DynamicShapeProto ToProto() const;
std::string DebugString() const;
private:
DynamicShape(Shape shape, DynamicShapeTag tag)
: shape_(std::move(shape)), tag_(std::move(tag)) {}
Shape shape_;
DynamicShapeTag tag_;
};
std::ostream& operator<<(std::ostream& os, const Shape& shape);
std::ostream& operator<<(std::ostream& os, const DynamicShape& dynamic_shape);
}
}
#endif
#include "xla/python/ifrt/shape.h"
#include <cstdint>
#include <ostream>
#include <string>
#include <utility>
#include <variant>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/shape.pb.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
template <class... Ts>
struct overloaded : Ts... {
using Ts::operator()...;
};
template <class... Ts>
overloaded(Ts...) -> overloaded<Ts...>;
}
absl::StatusOr<Shape> Shape::FromProto(const ShapeProto& proto) {
Shape::Dimensions dims;
dims.reserve(proto.dims_size());
for (int64_t dim : proto.dims()) {
if (dim < 0) {
return InvalidArgument(
"Shape expects non-negative dimension sizes, but got %d", dim);
}
dims.push_back(dim);
}
return Shape(std::move(dims));
}
ShapeProto Shape::ToProto() const {
ShapeProto proto;
proto.mutable_dims()->Reserve(dims().size());
for (int64_t dim : dims()) {
proto.mutable_dims()->AddAlreadyReserved(dim);
}
return proto;
}
int64_t Shape::num_elements() const {
int64_t count = 1;
for (int64_t d : dims_) {
count *= d;
}
return count;
}
std::string Shape::DebugString() const {
return absl::StrCat("[", absl::StrJoin(dims_, ","), "]");
}
absl::StatusOr<BoundedDynamicShapeTag> BoundedDynamicShapeTag::FromProto(
const BoundedDynamicShapeTagProto& proto) {
BoundedDynamicShapeTag::DynamicDimensions dynamic_dims;
dynamic_dims.reserve(proto.is_dynamic_dims_size());
for (bool dynamic_dim : proto.is_dynamic_dims()) {
dynamic_dims.push_back(dynamic_dim);
}
return BoundedDynamicShapeTag(std::move(dynamic_dims));
}
BoundedDynamicShapeTagProto BoundedDynamicShapeTag::ToProto() const {
BoundedDynamicShapeTagProto proto;
proto.mutable_is_dynamic_dims()->Reserve(dynamic_dims_.size());
for (bool dynamic_dim : dynamic_dims_) {
proto.mutable_is_dynamic_dims()->AddAlreadyReserved(dynamic_dim);
}
return proto;
}
absl::StatusOr<DynamicShape> DynamicShape::Create(Shape shape,
DynamicShapeTag tag) {
TF_RETURN_IF_ERROR(std::visit(
overloaded{
[&](const BoundedDynamicShapeTag& tag) -> absl::Status {
if (tag.DynamicDims().size() != shape.dims().size()) {
return InvalidArgument(
"Shape and tag must have the same number of dimensions.");
}
return absl::OkStatus();
},
},
tag));
return DynamicShape(std::move(shape), std::move(tag));
}
absl::StatusOr<Shape> DynamicShape::GetPaddedShape() const {
return std::visit(
overloaded{
[this](BoundedDynamicShapeTag tag) { return shape_; },
},
tag_);
}
bool DynamicShape::IsDynamicDim(int dimension) const {
return std::visit(
overloaded{
[dimension](BoundedDynamicShapeTag tag) {
return tag.DynamicDims().at(dimension);
},
},
tag_);
}
absl::StatusOr<DynamicShape> DynamicShape::FromProto(
const DynamicShapeProto& proto) {
TF_ASSIGN_OR_RETURN(Shape shape, Shape::FromProto(proto.shape()));
if (proto.has_bounded_dynamic_shape_tag()) {
TF_ASSIGN_OR_RETURN(
BoundedDynamicShapeTag tag,
BoundedDynamicShapeTag::FromProto(proto.bounded_dynamic_shape_tag()));
return DynamicShape::Create(std::move(shape), std::move(tag));
}
return InvalidArgument("Only support bounded dynamic shape.");
}
DynamicShapeProto DynamicShape::ToProto() const {
DynamicShapeProto proto;
*proto.mutable_shape() = shape_.ToProto();
std::visit(
overloaded{
[&proto](BoundedDynamicShapeTag tag) {
*proto.mutable_bounded_dynamic_shape_tag() = tag.ToProto();
},
},
tag_);
return proto;
}
std::string DynamicShape::DebugString() const {
return std::visit(
overloaded{[this](BoundedDynamicShapeTag tag) {
absl::InlinedVector<std::string, Shape::kInlineDimensionSize> dim_reps;
dim_reps.reserve(shape_.dims().size());
for (int i = 0; i < shape_.dims().size(); ++i) {
absl::string_view prefix = tag.DynamicDims()[i] ? "<=" : "";
dim_reps.push_back(absl::StrCat(prefix, shape_.dims()[i]));
}
return absl::StrCat("[", absl::StrJoin(dim_reps, ","), "]");
}},
tag_);
}
std::ostream& operator<<(std::ostream& os, const Shape& shape) {
return os << shape.DebugString();
}
std::ostream& operator<<(std::ostream& os, const DynamicShape& dynamic_shape) {
return os << dynamic_shape.DebugString();
}
}
} | #include "xla/python/ifrt/shape.h"
#include <cstdint>
#include <limits>
#include <numeric>
#include <sstream>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/python/ifrt/shape.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(ShapeTest, LargeDim) {
Shape shape({std::numeric_limits<int64_t>::max()});
EXPECT_THAT(shape.dims(), ElementsAre(std::numeric_limits<int64_t>::max()));
}
TEST(ShapeTest, ManyDims) {
const int kNumDims = 65536;
std::vector<int64_t> dims(kNumDims);
std::iota(dims.begin(), dims.end(), 0);
Shape shape(dims);
EXPECT_THAT(shape.dims(), ElementsAreArray(dims));
}
TEST(ShapeTest, ScalarNumElements) {
Shape shape({});
EXPECT_EQ(shape.num_elements(), 1);
}
TEST(ShapeTest, ZeroDimNumElements) {
{
Shape shape({0});
EXPECT_EQ(shape.num_elements(), 0);
}
{
Shape shape({1, 0});
EXPECT_EQ(shape.num_elements(), 0);
}
{
Shape shape({0, 1});
EXPECT_EQ(shape.num_elements(), 0);
}
{
Shape shape({0, 0});
EXPECT_EQ(shape.num_elements(), 0);
}
}
TEST(ShapeTest, NonZeroDimsNumElements) {
{
Shape shape({2});
EXPECT_EQ(shape.num_elements(), 2);
}
{
Shape shape({2, 3});
EXPECT_EQ(shape.num_elements(), 6);
}
}
TEST(ShapeTest, ToFromProto) {
{
Shape shape({});
ShapeProto proto = shape.ToProto();
TF_ASSERT_OK_AND_ASSIGN(Shape shape_copy, shape.FromProto(proto));
EXPECT_EQ(shape_copy, shape);
}
{
Shape shape({1, 2});
ShapeProto proto = shape.ToProto();
TF_ASSERT_OK_AND_ASSIGN(Shape shape_copy, shape.FromProto(proto));
EXPECT_EQ(shape_copy, shape);
}
}
TEST(BoundedDynamicShapeTagDeathTest, NoDynamicDim) {
EXPECT_DEATH(BoundedDynamicShapeTag tag({false, false}),
"At least one dimension needs to be dynamically sized");
}
TEST(BoundedDynamicShapeTagTest, ToFromProto) {
BoundedDynamicShapeTag tag({true, false});
BoundedDynamicShapeTagProto proto = tag.ToProto();
TF_ASSERT_OK_AND_ASSIGN(BoundedDynamicShapeTag tag_copy,
tag.FromProto(proto));
EXPECT_EQ(tag_copy, tag);
}
TEST(DynamicShapeTest, SizeMismatch) {
Shape shape({1, 2, 3});
BoundedDynamicShapeTag tag({true, true});
EXPECT_THAT(DynamicShape::Create(shape, tag),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("must have the same number of dimensions")));
}
TEST(DynamicShapeTest, Equality) {
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shape1,
DynamicShape::Create(Shape({2, 4}),
BoundedDynamicShapeTag({true, false})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shape2,
DynamicShape::Create(Shape({3, 4}),
BoundedDynamicShapeTag({true, false})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shape3,
DynamicShape::Create(Shape({2, 4}),
BoundedDynamicShapeTag({true, true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shape4,
DynamicShape::Create(Shape({2, 4, 3}),
BoundedDynamicShapeTag({true, false, true})));
EXPECT_EQ(shape1, shape1);
EXPECT_NE(shape1, shape2);
EXPECT_NE(shape1, shape3);
EXPECT_NE(shape1, shape4);
}
TEST(DynamicShapeTest, IsDynamicDim) {
Shape shape({1, 2, 3});
BoundedDynamicShapeTag tag({true, false, true});
TF_ASSERT_OK_AND_ASSIGN(DynamicShape dynamic_shape,
DynamicShape::Create(shape, tag));
EXPECT_TRUE(dynamic_shape.IsDynamicDim(0));
EXPECT_FALSE(dynamic_shape.IsDynamicDim(1));
EXPECT_TRUE(dynamic_shape.IsDynamicDim(2));
}
TEST(DynamicShapeTest, GetPaddedShape) {
Shape shape({1, 2, 3});
BoundedDynamicShapeTag tag({true, true, true});
TF_ASSERT_OK_AND_ASSIGN(DynamicShape dynamic_shape,
DynamicShape::Create(shape, tag));
TF_ASSERT_OK_AND_ASSIGN(Shape padded_shape, dynamic_shape.GetPaddedShape());
EXPECT_EQ(padded_shape, shape);
}
TEST(DynamicShapeTest, ToFromProto) {
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shape,
DynamicShape::Create(Shape({2, 4}),
BoundedDynamicShapeTag({true, false})));
DynamicShapeProto proto = shape.ToProto();
TF_ASSERT_OK_AND_ASSIGN(DynamicShape shape_copy, shape.FromProto(proto));
EXPECT_EQ(shape_copy, shape);
}
TEST(DynamicShapeTest, ToString) {
{
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shape,
DynamicShape::Create(Shape({2, 4}),
BoundedDynamicShapeTag({true, true})));
std::ostringstream output;
output << shape;
EXPECT_EQ(output.str(), "[<=2,<=4]");
}
{
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shape,
DynamicShape::Create(Shape({2, 4}),
BoundedDynamicShapeTag({false, true})));
std::ostringstream output;
output << shape;
EXPECT_EQ(output.str(), "[2,<=4]");
}
}
}
}
} |
745 | cpp | tensorflow/tensorflow | quantized_tensor_element_type | tensorflow/lite/experimental/shlo/quantized_tensor_element_type.cc | tensorflow/lite/experimental/shlo/quantized_tensor_element_type_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_QUANTIZED_TENSOR_ELEMENT_TYPE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_QUANTIZED_TENSOR_ELEMENT_TYPE_H_
#include <cstdint>
#include <initializer_list>
#include <optional>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
namespace shlo_ref {
constexpr bool IsValidQuantizationTypePair(DataType storage_type,
DataType expressed_type) {
switch (storage_type) {
case DataType::kSI4:
case DataType::kSI8:
case DataType::kSI16:
break;
default:
return false;
}
switch (expressed_type) {
case DataType::kBF16:
case DataType::kF16:
case DataType::kF32:
break;
default:
return false;
}
return SizeOf(storage_type) < SizeOf(expressed_type);
}
class QuantizedElementTypePerTensor {
public:
using ZeroPointVariant =
std::variant<Storage<DataType::kSI4>::Type, Storage<DataType::kSI8>::Type,
Storage<DataType::kSI16>::Type>;
using ScaleVariant = std::variant<Storage<DataType::kBF16>::Type,
Storage<DataType::kF16>::Type,
Storage<DataType::kF32>::Type>;
template <class T, class U>
QuantizedElementTypePerTensor(DataType storage_type, T zero_point,
DataType expressed_type, U scale) {
#define SHLO_STORAGE_CASE(TYPE) \
case DataType ::k##TYPE: \
zero_point_ = \
static_cast<typename Storage<DataType::k##TYPE>::Type>(zero_point); \
break;
switch (storage_type) {
SHLO_STORAGE_CASE(SI4);
SHLO_STORAGE_CASE(SI8);
SHLO_STORAGE_CASE(SI16);
default:
ABSL_LOG(FATAL) << "Unsupported quantization storage type ("
<< ToString(storage_type) << ").";
}
#undef SHLO_STORAGE_CASE
#define SHLO_EXPRESSED_CASE(TYPE) \
case DataType ::k##TYPE: \
scale_ = static_cast<typename Storage<DataType::k##TYPE>::Type>(scale); \
break;
switch (expressed_type) {
SHLO_EXPRESSED_CASE(BF16);
SHLO_EXPRESSED_CASE(F16);
SHLO_EXPRESSED_CASE(F32);
default:
ABSL_LOG(FATAL) << "Unsupported quantization expressed type ("
<< ToString(expressed_type) << ").";
}
#undef SHLO_EXPRESSED_CASE
ABSL_CHECK(IsValidQuantizationTypePair(StorageType(), ExpressedType()));
}
DataType ExpressedType() const {
const DataType scale_types[] = {DataType::kBF16, DataType::kF16,
DataType::kF32};
return scale_types[scale_.index()];
}
DataType StorageType() const {
const DataType zero_point_types[] = {DataType::kSI4, DataType::kSI8,
DataType::kSI16, DataType::kSI32};
return zero_point_types[zero_point_.index()];
}
ScaleVariant& Scale() { return scale_; }
const ScaleVariant& Scale() const { return scale_; }
template <DataType expressed_type>
const typename Storage<expressed_type>::Type& ScaleAs() const {
return std::get<typename Storage<expressed_type>::Type>(scale_);
}
ZeroPointVariant& ZeroPoint() { return zero_point_; }
const ZeroPointVariant& ZeroPoint() const { return zero_point_; }
template <DataType storage_type>
const typename Storage<storage_type>::Type& ZeroPointAs() const {
return std::get<typename Storage<storage_type>::Type>(zero_point_);
}
friend bool operator==(const QuantizedElementTypePerTensor& lhs,
const QuantizedElementTypePerTensor& rhs) {
return lhs.zero_point_ == rhs.zero_point_ && lhs.scale_ == rhs.scale_;
}
friend bool operator!=(const QuantizedElementTypePerTensor& lhs,
const QuantizedElementTypePerTensor& rhs) {
return !(lhs == rhs);
}
private:
ZeroPointVariant zero_point_;
ScaleVariant scale_;
};
class QuantizedElementTypePerAxis {
template <class To, class FromRange, class... Ts>
void ConvertAndAssign(std::variant<Ts...>& dest, FromRange&& range) {
using std::begin;
using std::end;
dest = To(begin(range), end(range));
}
public:
template <typename T>
using SmallInlinedVector = absl::InlinedVector<T, 8>;
using ScalesVariant =
std::variant<SmallInlinedVector<Storage<DataType::kBF16>::Type>,
SmallInlinedVector<Storage<DataType::kF16>::Type>,
SmallInlinedVector<Storage<DataType::kF32>::Type>>;
using ZeroPointsVariant =
std::variant<SmallInlinedVector<Storage<DataType::kSI4>::Type>,
SmallInlinedVector<Storage<DataType::kSI8>::Type>,
SmallInlinedVector<Storage<DataType::kSI16>::Type>>;
template <class RangeT = std::initializer_list<int32_t>,
class RangeU = std::initializer_list<float>>
QuantizedElementTypePerAxis(DataType storage_type, RangeT&& zero_points,
DataType expressed_type, RangeU&& scales,
Axis quantized_dimension)
: quantized_dimension_(quantized_dimension) {
#define SHLO_STORAGE_CASE(TYPE) \
case DataType ::k##TYPE: \
ConvertAndAssign<SmallInlinedVector<Storage<DataType::k##TYPE>::Type>>( \
zero_points_, static_cast<RangeT&&>(zero_points)); \
break;
switch (storage_type) {
SHLO_STORAGE_CASE(SI4);
SHLO_STORAGE_CASE(SI8);
SHLO_STORAGE_CASE(SI16);
default:
ABSL_LOG(FATAL) << "Unsupported quantization storage type ("
<< ToString(storage_type) << ").";
}
#undef SHLO_STORAGE_CASE
#define SHLO_EXPRESSED_CASE(TYPE) \
case DataType ::k##TYPE: \
ConvertAndAssign<SmallInlinedVector<Storage<DataType::k##TYPE>::Type>>( \
scales_, static_cast<RangeU&&>(scales)); \
break;
switch (expressed_type) {
SHLO_EXPRESSED_CASE(BF16);
SHLO_EXPRESSED_CASE(F16);
SHLO_EXPRESSED_CASE(F32);
default:
ABSL_LOG(FATAL) << "Unsupported quantization expressed type ("
<< ToString(expressed_type) << ").";
}
#undef SHLO_EXPRESSED_CASE
ABSL_CHECK(IsValidQuantizationTypePair(StorageType(), ExpressedType()));
}
DataType ExpressedType() const {
const DataType scale_types[] = {DataType::kBF16, DataType::kF16,
DataType::kF32};
return scale_types[scales_.index()];
}
DataType StorageType() const {
const DataType zero_point_types[] = {DataType::kSI4, DataType::kSI8,
DataType::kSI16, DataType::kSI32};
return zero_point_types[zero_points_.index()];
}
Axis& QuantizedDimension() { return quantized_dimension_; }
const Axis& QuantizedDimension() const { return quantized_dimension_; }
ScalesVariant& Scales() { return scales_; }
const ScalesVariant& Scales() const { return scales_; }
template <DataType expressed_type>
const SmallInlinedVector<typename Storage<expressed_type>::Type>& ScalesAs()
const {
return std::get<SmallInlinedVector<typename Storage<expressed_type>::Type>>(
scales_);
}
ZeroPointsVariant& ZeroPoints() { return zero_points_; }
const ZeroPointsVariant& ZeroPoints() const { return zero_points_; }
template <DataType storage_type>
const SmallInlinedVector<typename Storage<storage_type>::Type>& ZeroPointsAs()
const {
return std::get<SmallInlinedVector<typename Storage<storage_type>::Type>>(
zero_points_);
}
friend bool operator==(const QuantizedElementTypePerAxis& lhs,
const QuantizedElementTypePerAxis& rhs) {
return lhs.zero_points_ == rhs.zero_points_ && lhs.scales_ == rhs.scales_;
}
friend bool operator!=(const QuantizedElementTypePerAxis& lhs,
const QuantizedElementTypePerAxis& rhs) {
return !(lhs == rhs);
}
private:
Axis quantized_dimension_;
ScalesVariant scales_;
ZeroPointsVariant zero_points_;
};
std::string ToString(const QuantizedElementTypePerTensor& t);
std::string ToString(const QuantizedElementTypePerAxis& t);
QuantizedElementTypePerTensor BaselineType(
const QuantizedElementTypePerTensor& type);
QuantizedElementTypePerAxis BaselineType(
const QuantizedElementTypePerAxis& type);
}
#endif
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include <sstream>
#include <string>
#include <type_traits>
#include <variant>
#include "tensorflow/lite/experimental/shlo/data_type.h"
namespace shlo_ref {
std::string ToString(const QuantizedElementTypePerTensor& t) {
std::stringstream sstr;
sstr << "QuantizedPerTensor[" << ToString(t.StorageType()) << ", "
<< ToString(t.ExpressedType()) << "]";
return sstr.str();
}
std::string ToString(const QuantizedElementTypePerAxis& t) {
std::stringstream sstr;
sstr << "QuantizedPerAxis[" << ToString(t.StorageType()) << ", "
<< ToString(t.ExpressedType()) << ", " << t.QuantizedDimension() << "]";
return sstr.str();
}
QuantizedElementTypePerTensor BaselineType(
const QuantizedElementTypePerTensor& type) {
QuantizedElementTypePerTensor baseline = type;
std::visit(
[](auto& scale) -> void {
scale = std::remove_reference_t<decltype(scale)>(1);
},
baseline.Scale());
std::visit(
[](auto& zero_point) -> void {
zero_point = std::remove_reference_t<decltype(zero_point)>(0);
},
baseline.ZeroPoint());
return baseline;
}
QuantizedElementTypePerAxis BaselineType(
const QuantizedElementTypePerAxis& type) {
QuantizedElementTypePerAxis baseline = type;
std::visit(
[](auto& scales) -> void {
using T = std::remove_reference_t<decltype(scales[0])>;
absl::c_fill(scales, static_cast<T>(1));
},
baseline.Scales());
std::visit(
[](auto& zero_points) -> void {
using T = std::remove_reference_t<decltype(zero_points[0])>;
absl::c_fill(zero_points, static_cast<T>(0));
},
baseline.ZeroPoints());
return baseline;
}
} | #include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include <array>
#include <cstdint>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
namespace shlo_ref {
namespace {
using testing::Each;
using testing::ElementsAre;
using testing::ElementsAreArray;
using testing::Eq;
using testing::FloatEq;
using testing::Pointwise;
TEST(Quantization, IsValidQuantizationTypePairWorks) {
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI32));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kBF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI32));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kBF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kF32));
}
struct QuantizationPair {
DataType storage_type;
DataType expressed_type;
};
std::vector<QuantizationPair> ValidQuantizationTypePairs() {
return {QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kBF16},
QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kF16},
QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kF32},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kBF16},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kF16},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kF32},
QuantizationPair{.storage_type = DataType::kSI16,
.expressed_type = DataType::kF32}};
}
struct PerTensorTest : testing::TestWithParam<QuantizationPair> {
static constexpr auto ExtractValueAsInt = [](auto v) {
return static_cast<int32_t>(v);
};
static constexpr auto ExtractValueAsFloat = [](auto v) {
return static_cast<float>(v);
};
};
TEST_P(PerTensorTest, BuildPerTensorWorks) {
const QuantizationPair& config = GetParam();
QuantizedElementTypePerTensor type(config.storage_type, 1,
config.expressed_type, 2.5);
EXPECT_EQ(type.StorageType(), config.storage_type);
EXPECT_EQ(type.ExpressedType(), config.expressed_type);
EXPECT_EQ(std::visit(ExtractValueAsInt, type.ZeroPoint()), 1);
EXPECT_THAT(std::visit(ExtractValueAsFloat, type.Scale()), FloatEq(2.5));
}
TEST_P(PerTensorTest, BaselineTypeWorks) {
float scale = 0.5f;
int32_t zero_point = 3;
const QuantizationPair& config = GetParam();
QuantizedElementTypePerTensor element(config.storage_type, zero_point,
config.expressed_type, scale);
const auto baseline = BaselineType(element);
EXPECT_EQ(baseline.StorageType(), element.StorageType());
EXPECT_EQ(baseline.ExpressedType(), element.ExpressedType());
EXPECT_EQ(std::visit(ExtractValueAsInt, baseline.ZeroPoint()), 0);
EXPECT_THAT(std::visit(ExtractValueAsFloat, baseline.Scale()), FloatEq(1));
}
INSTANTIATE_TEST_SUITE_P(PerTensor, PerTensorTest,
testing::ValuesIn(ValidQuantizationTypePairs()));
struct PerAxisTest : testing::TestWithParam<QuantizationPair> {
static constexpr auto ExtractValueAsInt = [](auto v) {
return std::vector<int32_t>(v.begin(), v.end());
};
static constexpr auto ExtractValueAsFloat = [](auto v) {
return std::vector<float>(v.begin(), v.end());
};
};
TEST_P(PerAxisTest, BuildPerAxisWorks) {
const QuantizationPair& config = GetParam();
const std::vector<int32_t> ref_zero_points{1, 2, 3};
const std::vector<float> ref_scales{1.5, 2.5, 3.5};
QuantizedElementTypePerAxis type(config.storage_type, ref_zero_points,
config.expressed_type, ref_scales,
1);
EXPECT_EQ(type.StorageType(), config.storage_type);
EXPECT_EQ(type.ExpressedType(), config.expressed_type);
EXPECT_THAT(std::visit(ExtractValueAsInt, type.ZeroPoints()),
ElementsAreArray(ref_zero_points));
EXPECT_THAT(std::visit(ExtractValueAsFloat, type.Scales()),
Pointwise(FloatEq(), ref_scales));
}
TEST_P(PerAxisTest, BaselineTypeWorks) {
const QuantizationPair& config = GetParam();
float scales[3] = {0.5f, 0.6f, 0.2f};
int32_t zero_points[3] = {3, 1, 2};
const QuantizedElementTypePerAxis element(config.storage_type, scales,
config.expressed_type, zero_points,
3u);
const auto baseline = BaselineType(element);
const auto extracted_zero_points =
std::visit(ExtractValueAsInt, baseline.ZeroPoints());
const auto extracted_scales =
std::visit(ExtractValueAsFloat, baseline.Scales());
EXPECT_EQ(baseline.StorageType(), element.StorageType());
EXPECT_EQ(baseline.ExpressedType(), element.ExpressedType());
EXPECT_EQ(baseline.QuantizedDimension(), element.QuantizedDimension());
EXPECT_THAT(extracted_zero_points, Each(0));
EXPECT_THAT(extracted_zero_points.size(), std::size(zero_points));
EXPECT_THAT(extracted_scales, Each(FloatEq(1.0f)));
EXPECT_THAT(extracted_scales.size(), std::size(scales));
}
INSTANTIATE_TEST_SUITE_P(PerAxis, PerAxisTest,
testing::ValuesIn(ValidQuantizationTypePairs()));
}
} |
746 | cpp | tensorflow/tensorflow | debug | tensorflow/lite/experimental/shlo/legacy/src/debug.cc | tensorflow/compiler/mlir/lite/debug/debug_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_DEBUG_DEBUG_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_DEBUG_DEBUG_H_
#include "llvm/Support/raw_ostream.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/lite/debug/debug_options.pb.h"
namespace tensorflow {
void InitPassManager(mlir::PassManager& pm,
const converter::DebugOptions& options,
llvm::raw_ostream& out = llvm::outs());
}
#endif
#include <memory>
#include "tensorflow/core/common_runtime/debugger_state_interface.h"
#include "tensorflow/core/debug/debugger_state_impl.h"
namespace tensorflow {
namespace {
class DebuggerStateRegistration {
public:
static std::unique_ptr<DebuggerStateInterface> CreateDebuggerState(
const DebugOptions& options) {
return std::unique_ptr<DebuggerStateInterface>(new DebuggerState(options));
}
static std::unique_ptr<DebugGraphDecoratorInterface>
CreateDebugGraphDecorator(const DebugOptions& options) {
return std::unique_ptr<DebugGraphDecoratorInterface>(
new DebugGraphDecorator(options));
}
DebuggerStateRegistration() {
DebuggerStateRegistry::RegisterFactory(CreateDebuggerState);
DebugGraphDecoratorRegistry::RegisterFactory(CreateDebugGraphDecorator);
}
};
static DebuggerStateRegistration register_debugger_state_implementation;
}
} | #include "tensorflow/compiler/mlir/lite/debug/debug.h"
#include <stdint.h>
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/lite/debug/debug_options.pb.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace debug_test {
class NopPass : public mlir::PassWrapper<NopPass, mlir::OperationPass<>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(NopPass)
void runOnOperation() override {}
};
class MutatePass : public mlir::PassWrapper<MutatePass, mlir::OperationPass<>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MutatePass)
void runOnOperation() override {
mlir::OpBuilder builder(&getContext());
getOperation()->setAttr("tfl.random_attr", builder.getUnitAttr());
}
};
class AlwaysFailPass
: public mlir::PassWrapper<AlwaysFailPass, mlir::OperationPass<>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AlwaysFailPass)
void runOnOperation() override { signalPassFailure(); }
};
}
namespace {
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using namespace tensorflow::debug_test;
class InitPassManagerTest : public testing::Test {
protected:
InitPassManagerTest()
: path_(GetOutputPath()), context_([]() {
mlir::registerPassManagerCLOptions();
mlir::DialectRegistry registry;
registry.insert<mlir::BuiltinDialect>();
registry.insert<mlir::arith::ArithDialect>();
registry.insert<mlir::func::FuncDialect>();
registry.insert<mlir::TFL::TensorFlowLiteDialect>();
return registry;
}()) {
context_.loadAllAvailableDialects();
mlir::OpBuilder builder(&context_);
module_ = builder.create<mlir::ModuleOp>(builder.getUnknownLoc());
builder.setInsertionPointToStart(module_->getBody());
auto func = builder.create<mlir::func::FuncOp>(
builder.getUnknownLoc(), "main", builder.getFunctionType({}, {}));
func->setAttr("tfl.func", builder.getUnitAttr());
builder.setInsertionPointToStart(func.addEntryBlock());
llvm::SmallVector<int> shape{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
builder.create<mlir::arith::ConstantOp>(
builder.getUnknownLoc(),
mlir::DenseIntElementsAttr::get(
mlir::RankedTensorType::get(shape.size(), builder.getI32Type()),
shape));
builder.create<mlir::func::ReturnOp>(builder.getUnknownLoc());
}
absl::Status GetDumpDir(std::string* dump_dir) {
std::vector<string> files;
if (auto status = tsl::Env::Default()->GetChildren(path_, &files);
!status.ok()) {
return status;
}
if (files.size() != 1) {
return absl::FailedPreconditionError(
"Expecting directory to have one child.");
}
*dump_dir = tsl::io::JoinPath(path_, files[0]);
return absl::OkStatus();
}
std::string path_;
mlir::MLIRContext context_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
private:
std::string GetOutputPath() {
const auto* const test_info =
testing::UnitTest::GetInstance()->current_test_info();
return tsl::io::JoinPath(
getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
absl::StrCat(test_info->test_suite_name(), ".", test_info->name()));
}
};
TEST_F(InitPassManagerTest, CrashReproducer) {
converter::DebugOptions debug_options;
*debug_options.mutable_ir_dump_dir() = path_;
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options);
pm.addPass(std::make_unique<AlwaysFailPass>());
ASSERT_TRUE(mlir::failed(pm.run(*module_)));
std::string dump_dir;
TF_ASSERT_OK(GetDumpDir(&dump_dir));
std::string mlir_dump;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(dump_dir, "tfl_mlir_crash_repro.mlir"), &mlir_dump));
EXPECT_THAT(mlir_dump, Not(IsEmpty()));
}
TEST_F(InitPassManagerTest, DumpToDir) {
converter::DebugOptions debug_options;
*debug_options.mutable_ir_dump_dir() = path_;
*debug_options.mutable_ir_dump_pass_regex() = R"(.*NopPass)";
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options);
pm.addPass(std::make_unique<NopPass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
std::string dump_dir;
TF_ASSERT_OK(GetDumpDir(&dump_dir));
{
std::string mlir_dump;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(
dump_dir, "00000000.main.tensorflow_debug_test_NopPass_after.mlir"),
&mlir_dump));
EXPECT_THAT(mlir_dump, Not(IsEmpty()));
}
{
std::string mlir_dump;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(
dump_dir,
"00000000.main.tensorflow_debug_test_NopPass_before.mlir"),
&mlir_dump));
EXPECT_THAT(mlir_dump, Not(IsEmpty()));
}
}
TEST_F(InitPassManagerTest, PrintIRBeforeEverything) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<NopPass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump Before tensorflow::debug_test::NopPass"));
EXPECT_THAT(captured_out,
Not(HasSubstr("IR Dump After tensorflow::debug_test::NopPass")));
}
TEST_F(InitPassManagerTest, PrintIRAfterEverything) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_after() = R"(.*)";
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump After tensorflow::debug_test::MutatePass"));
EXPECT_THAT(
captured_out,
Not(HasSubstr("IR Dump Before tensorflow::debug_test::MutatePass")));
}
TEST_F(InitPassManagerTest, PrintIRBeforeAndAfterEverything) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
*debug_options.mutable_print_ir_after() = R"(.*)";
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump After tensorflow::debug_test::MutatePass"));
EXPECT_THAT(captured_out,
HasSubstr("IR Dump Before tensorflow::debug_test::MutatePass"));
}
TEST_F(InitPassManagerTest, ElideLargeElementAttrs) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
debug_options.set_elide_elementsattrs_if_larger(5);
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out, HasSubstr("dense_resource<__elided__>"));
}
TEST_F(InitPassManagerTest, DontElideSmallerElementAttrs) {
converter::DebugOptions debug_options;
*debug_options.mutable_print_ir_before() = R"(.*)";
debug_options.set_elide_elementsattrs_if_larger(11);
std::string captured_out;
llvm::raw_string_ostream out(captured_out);
mlir::PassManager pm(&context_);
InitPassManager(pm, debug_options, out);
pm.addPass(std::make_unique<MutatePass>());
ASSERT_TRUE(mlir::succeeded(pm.run(*module_)));
EXPECT_THAT(captured_out,
HasSubstr("dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]>"));
}
}
} |
747 | cpp | tensorflow/tensorflow | is_finite | tensorflow/lite/experimental/shlo/legacy/src/is_finite.cc | tensorflow/lite/experimental/shlo/legacy/test/is_finite_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_IS_FINITE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_IS_FINITE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct IsFiniteOp {
struct Attributes {};
};
IsFiniteOp Create(const IsFiniteOp::Attributes& attributes);
absl::Status Prepare(IsFiniteOp& op, const Tensor& operand, Tensor& result);
absl::Status Evaluate(IsFiniteOp& op, const Tensor& operand, Tensor& result);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/is_finite.h"
#include <algorithm>
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace {
absl::Status CheckParameters(const Tensor& operand, const Tensor& result) {
if (operand.shape() != result.shape()) {
return absl::InvalidArgumentError(
"operand and result must have the same shape");
}
if (!operand.IsQuantized() && !IsFloat(operand.tensor_element_type())) {
return absl::InvalidArgumentError(
"operand must be floating-point type or per-tensor quantized");
}
if (operand.IsPerAxisQuantized()) {
return absl::InvalidArgumentError("operand cannot be per-axis quantized");
}
if (result.IsQuantized()) {
return absl::InvalidArgumentError("result cannot be quantized");
}
if (!IsBool(result.tensor_element_type())) {
return absl::InvalidArgumentError("result must be an I1 tensor");
}
if (operand.NumElements() != result.NumElements()) {
return absl::InvalidArgumentError(
"operand and result must have the same size");
}
return absl::OkStatus();
}
template <DataType data_type>
absl::Status EvaluateImpl(const Tensor& operand, bool* output) {
const auto* in = operand.GetDataAs<data_type>();
const auto num_elements = operand.NumElements();
for (DimensionSize i = 0; i < num_elements; ++i) {
output[i] = std::isfinite(static_cast<float>(in[i]));
}
return absl::OkStatus();
}
absl::Status EvaluateImpl(const Tensor& operand, Tensor& result) {
bool* output = result.GetDataAs<DataType::kI1>();
if (!operand.IsQuantized()) {
DISPATCH_FLOAT(EvaluateImpl, operand.tensor_element_type(), operand,
output);
} else {
const auto num_elements = result.NumElements();
std::fill(output, output + num_elements, true);
}
return absl::OkStatus();
}
}
IsFiniteOp Create(const IsFiniteOp::Attributes& attributes) {
return IsFiniteOp();
}
absl::Status Prepare(IsFiniteOp& op, const Tensor& operand, Tensor& result) {
return CheckParameters(operand, result);
}
absl::Status Evaluate(IsFiniteOp& op, const Tensor& operand, Tensor& result) {
if (!operand.data) {
return absl::InvalidArgumentError("No operand.data");
}
if (!result.data) {
return absl::InvalidArgumentError("No result.data");
}
return EvaluateImpl(operand, result);
}
} | #include "tensorflow/lite/experimental/shlo/ops/is_finite.h"
#include <cmath>
#include <cstddef>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
#include "tensorflow/lite/experimental/shlo/tensor_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor_with_data.h"
namespace shlo_ref {
namespace {
using ::shlo_ref::testing::TensorEq;
struct Params {
TensorWithData operand;
TensorWithData expected;
};
class IsFiniteTest : public ::testing::TestWithParam<Params> {};
TEST_P(IsFiniteTest, IsFinite) {
const auto& params = GetParam();
IsFiniteOp op = Create(IsFiniteOp::Attributes{});
Tensor result{.type = params.expected.tensor().type};
ASSERT_OK(Prepare(op, params.operand.tensor(), result));
std::vector<std::byte> result_data(result.SizeInBytes());
result.data = result_data.data();
EXPECT_OK(Evaluate(op, params.operand.tensor(), result));
EXPECT_THAT(result, TensorEq(params.expected.tensor()));
}
INSTANTIATE_TEST_SUITE_P(
Unquantized, IsFiniteTest,
::testing::Values(
Params{TensorWithData::Create<DataType::kBF16>(
Shape{{7}},
{BF16{+NAN}, BF16{-NAN}, BF16{-INFINITY}, BF16{+INFINITY},
BF16{-1.0f}, BF16{0.0f}, BF16{1.0f}}),
TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {false, false, false, false, true, true, true})},
Params{
TensorWithData::Create<DataType::kF16>(
Shape{{7}}, {F16{+NAN}, F16{-NAN}, F16{-INFINITY},
F16{+INFINITY}, F16{-1.0f}, F16{0.0f}, F16{1.0f}}),
TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {false, false, false, false, true, true, true})},
Params{
TensorWithData::Create<DataType::kF32>(
Shape{{7}},
{+NAN, -NAN, -INFINITY, +INFINITY, -1.0f, 0.0f, 1.0f}),
TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {false, false, false, false, true, true, true})}));
INSTANTIATE_TEST_SUITE_P(
Quantized, IsFiniteTest,
::testing::Values(Params{
.operand = TensorWithData::Create<DataType::kSI16, DataType::kF32>(
Shape{{7}}, {0.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}, 0.1f, 0),
.expected = TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {true, true, true, true, true, true, true})}));
}
} |
748 | cpp | tensorflow/tensorflow | compare | tensorflow/lite/experimental/shlo/legacy/src/compare.cc | tensorflow/lite/experimental/shlo/legacy/test/compare_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_COMPARE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_COMPARE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct CompareOp {
enum class ComparisonDirection { kEq, kNe, kGe, kGt, kLe, kLt };
struct Attributes {
ComparisonDirection comparison_direction;
};
Attributes attributes;
};
CompareOp Create(CompareOp::Attributes attributes);
absl::Status Prepare(CompareOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(CompareOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/compare.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace {
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeCompare(F&& func, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const DimensionSize num_elements = lhs.NumElements();
const StorageT lhs_zero_point =
lhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT lhs_scale =
lhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT rhs_zero_point =
rhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT rhs_scale =
rhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT* lhs_data = lhs.GetDataAs<storage_type>();
const StorageT* rhs_data = rhs.GetDataAs<storage_type>();
bool* output_data = output.GetDataAs<DataType::kI1>();
for (DimensionSize i = 0; i < num_elements;
++i, ++lhs_data, ++rhs_data, ++output_data) {
const ExpressedT dequantized_lhs =
Dequantize(*lhs_data, lhs_zero_point, lhs_scale);
const ExpressedT dequantized_rhs =
Dequantize(*rhs_data, rhs_zero_point, rhs_scale);
*output_data = func(dequantized_lhs, dequantized_rhs);
}
}
}
CompareOp Create(CompareOp::Attributes attributes) {
return {.attributes = attributes};
}
absl::Status Prepare(CompareOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("compare"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("compare"), output, IsBoolTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("compare"), lhs, rhs));
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
return absl::OkStatus();
}
absl::Status Evaluate(CompareOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
#define SHLO_REF_COMPARISON_DIRECTION_CASE(DIRECTION, COMPARISON_OP) \
case CompareOp::ComparisonDirection::DIRECTION: { \
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) { \
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization, \
lhs.tensor_element_type(), COMPARISON_OP, lhs, \
rhs, output); \
} else if (IsQuantizedPerTensorTensor(lhs)) { \
DISPATCH_QUANTIZED( \
DequantizeCompare, \
lhs.quantized_per_tensor_element_type().StorageType(), \
lhs.quantized_per_tensor_element_type().ExpressedType(), \
COMPARISON_OP, lhs, rhs, output) \
} \
break; \
}
switch (op.attributes.comparison_direction) {
SHLO_REF_COMPARISON_DIRECTION_CASE(kEq, std::equal_to<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kNe, std::not_equal_to<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kGe, std::greater_equal<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kGt, std::greater<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kLe, std::less_equal<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kLt, std::less<void>());
}
return absl::FailedPreconditionError(
"stablehlo.compare: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/compare.h"
#include <string>
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CompareOp> {
static std::string Get() { return "Compare"; }
};
struct Compare {
template <class T>
constexpr bool operator()(const T a, const T b) const {
switch (comparison_direction) {
case CompareOp::ComparisonDirection::kEq:
return a == b;
case CompareOp::ComparisonDirection::kNe:
return a != b;
case CompareOp::ComparisonDirection::kGe:
return a >= b;
case CompareOp::ComparisonDirection::kGt:
return a > b;
case CompareOp::ComparisonDirection::kLe:
return a <= b;
case CompareOp::ComparisonDirection::kLt:
return a < b;
}
return false;
}
CompareOp::ComparisonDirection comparison_direction;
};
const char* ToString(CompareOp::ComparisonDirection comparison_direction) {
switch (comparison_direction) {
case CompareOp::ComparisonDirection::kEq:
return "eq";
case CompareOp::ComparisonDirection::kNe:
return "ne";
case CompareOp::ComparisonDirection::kGe:
return "ge";
case CompareOp::ComparisonDirection::kGt:
return "gt";
case CompareOp::ComparisonDirection::kLe:
return "le";
case CompareOp::ComparisonDirection::kLt:
return "lt";
}
}
template <>
struct SupportedOpAttributes<CompareOp> {
static CompareOp::Attributes Get() {
return {.comparison_direction = CompareOp::ComparisonDirection::kEq};
}
};
template <>
struct SupportedOpOutputDataType<CompareOp> {
static constexpr DataType kStorageType = DataType::kI1;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Compare, BinaryElementwiseOpShapePropagationTest,
CompareOp, TestParamNames);
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(
BinaryElementwiseSameBaselineElementTypeConstraintTest);
template <class Op, class SupportedTypes>
using CompareBaselineConstraintTypesCrossProduct =
MapTypes<OpTupleFactory<Op>::template WithOp,
FilterTypes<NegatePred<SameTypes>::template Predicate,
CrossProductTypes<SupportedTypes, SupportedTypes>>>;
using CompareBaselineContraintTypes =
CompareBaselineConstraintTypesCrossProduct<
CompareOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes,
BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
template <class T>
class CompareSameBaselineElementTypeConstraintTest : public ::testing::Test {};
TYPED_TEST_SUITE(CompareSameBaselineElementTypeConstraintTest,
CompareBaselineContraintTypes, TestParamNames);
TYPED_TEST(CompareSameBaselineElementTypeConstraintTest,
DifferentInputOutputStorageTypesRaiseAnError) {
using Op = std::tuple_element_t<0, TypeParam>;
using LhsTypeDesc = std::tuple_element_t<1, TypeParam>;
using RhsTypeDesc = std::tuple_element_t<2, TypeParam>;
const Shape shape({2, 3, 4});
Tensor lhs_tensor{.type = TensorTypeFor(LhsTypeDesc{}, shape),
.data = nullptr};
Tensor rhs_tensor{.type = TensorTypeFor(RhsTypeDesc{}, shape),
.data = nullptr};
Tensor output_tensor{.type = TensorTypeFor(TestParam<DataType::kI1>{}, shape),
.data = nullptr};
auto op = Create(typename Op::Attributes{});
const absl::Status status =
Prepare(op, lhs_tensor, rhs_tensor, output_tensor);
EXPECT_THAT(status, shlo_ref::testing::StatusIs(
absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(
status.message(),
::testing::ContainsRegex(
"stablehlo.[_a-z]+: baseline type constraint is not satisfied"));
}
using UnsupportedTypes =
WithOpTypes<CompareOp, ConcatTypes<PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Compare, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, ArithmeticTestTypes>;
template <class T>
struct CompareTest : ::testing::Test {};
TYPED_TEST_SUITE(CompareTest, SupportedTypes, TestParamNames);
TYPED_TEST(CompareTest, SupportedTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
absl::BitGen bit_gen;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = DataType::kI1},
.data = output_data.data()};
const CompareOp::ComparisonDirection comparison_direction =
static_cast<CompareOp::ComparisonDirection>(absl::Uniform(bit_gen, 0, 6));
Compare compare_ref{comparison_direction};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), compare_ref);
auto op = Create(
CompareOp::Attributes{.comparison_direction = comparison_direction});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedCompareTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCompareTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCompareTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
absl::BitGen bit_gen;
const Shape shape({2, 2, 2});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, zero_point + 1,
zero_point + 5);
Vector<StorageType<DataType::kI1>> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = DataType::kI1},
.data = output_data.data()};
const CompareOp::ComparisonDirection comparison_direction =
CompareOp::ComparisonDirection::kEq;
Compare compare_ref{comparison_direction};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale, compare_ref](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
return compare_ref(dequantized_lhs, dequantized_rhs);
});
auto op = Create(
CompareOp::Attributes{.comparison_direction = comparison_direction});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data))
<< "lhs " << ::testing::PrintToString(lhs_data) << "\n"
<< "rhs " << ::testing::PrintToString(rhs_data) << "\n"
<< "dir " << ToString(comparison_direction) << "\n";
}
}
} |
749 | cpp | tensorflow/tensorflow | select | tensorflow/lite/experimental/shlo/legacy/src/select.cc | third_party/xla/xla/tests/select_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename D, typename T>
void Select(const RuntimeShape& input_condition_shape,
const D* input_condition_data, const RuntimeShape& input_x_shape,
const T* input_x_data, const RuntimeShape& input_y_shape,
const T* input_y_data, const RuntimeShape& output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("Select");
int64_t flatsize;
if (input_condition_shape.FlatSize() == 1 && input_x_shape.FlatSize() == 1 &&
input_y_shape.FlatSize() == 1 && output_shape.FlatSize() == 1) {
flatsize = 1;
} else {
flatsize = MatchingFlatSize(input_condition_shape, input_x_shape,
input_y_shape, output_shape);
}
for (int64_t i = 0; i < flatsize; ++i) {
output_data[i] =
input_condition_data[i] ? input_x_data[i] : input_y_data[i];
}
}
template <typename D, typename T>
void RankOneSelect(const RuntimeShape& input_condition_shape,
const D* input_condition_data,
const RuntimeShape& input_x_shape, const T* input_x_data,
const RuntimeShape& input_y_shape, const T* input_y_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Select/RankOneSelect");
const int64_t outer_size = input_condition_shape.FlatSize();
int64_t inner_size;
if (input_condition_shape.DimensionsCount() == 0) {
inner_size = MatchingFlatSize(input_x_shape, input_y_shape, output_shape);
} else {
TFLITE_DCHECK_EQ(
MatchingDim(input_x_shape, 0, input_y_shape, 0, output_shape, 0),
outer_size);
inner_size =
MatchingFlatSizeSkipDim(input_x_shape, 0, input_y_shape, output_shape);
}
int64_t offset = 0;
for (int64_t i = 0; i < outer_size; i++) {
const T* input_data = input_condition_data[i] ? input_x_data : input_y_data;
memcpy(output_data + offset, input_data + offset, inner_size * sizeof(T));
offset += inner_size;
}
}
template <typename D, typename T>
void BroadcastSelect5DSlow(const RuntimeShape& input_condition_shape,
const D* input_condition_data,
const RuntimeShape& input_x_shape,
const T* input_x_data,
const RuntimeShape& input_y_shape,
const T* input_y_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Select/BroadcastSelectSlow");
TFLITE_DCHECK_LE(input_condition_shape.DimensionsCount(), 5);
TFLITE_DCHECK_LE(input_x_shape.DimensionsCount(), 5);
TFLITE_DCHECK_LE(input_y_shape.DimensionsCount(), 5);
TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 5);
NdArrayDesc<5> desc_condition;
NdArrayDesc<5> desc_x;
NdArrayDesc<5> desc_y;
NdArrayDesc<5> desc_output;
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(5, output_shape);
CopyDimsToDesc(extended_output_shape, &desc_output);
NdArrayDescsForElementwiseBroadcast(input_condition_shape, input_x_shape,
input_y_shape, &desc_condition, &desc_x,
&desc_y);
for (int n = 0; n < desc_output.extents[0]; ++n) {
int out_idx_n = desc_output.extents[1] * n;
int cond_idx_n = desc_condition.strides[0] * n;
int in_idx1_n = desc_x.strides[0] * n;
int in_idx2_n = desc_y.strides[0] * n;
for (int b = 0; b < desc_output.extents[1]; ++b) {
int out_idx_b = (out_idx_n + b) * desc_output.extents[2];
int cond_idx_b = cond_idx_n + desc_condition.strides[1] * b;
int in_idx1_b = in_idx1_n + desc_x.strides[1] * b;
int in_idx2_b = in_idx2_n + desc_y.strides[1] * b;
for (int y = 0; y < desc_output.extents[2]; ++y) {
int out_idx_y = (out_idx_b + y) * desc_output.extents[3];
int cond_idx_y = cond_idx_b + desc_condition.strides[2] * y;
int in_idx1_y = in_idx1_b + desc_x.strides[2] * y;
int in_idx2_y = in_idx2_b + desc_y.strides[2] * y;
for (int x = 0; x < desc_output.extents[3]; ++x) {
int out_idx = (out_idx_y + x) * desc_output.extents[4];
int cond_idx = cond_idx_y + desc_condition.strides[3] * x;
int in_idx1 = in_idx1_y + desc_x.strides[3] * x;
int in_idx2 = in_idx2_y + desc_y.strides[3] * x;
for (int c = 0; c < desc_output.extents[4]; ++c) {
output_data[out_idx] = input_condition_data[cond_idx]
? input_x_data[in_idx1]
: input_y_data[in_idx2];
out_idx++;
cond_idx += desc_condition.strides[4];
in_idx1 += desc_x.strides[4];
in_idx2 += desc_y.strides[4];
}
}
}
}
}
}
}
}
#endif
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace select {
constexpr int kInputTensorCondition = 0;
constexpr int kInputTensorX = 1;
constexpr int kInputTensorY = 2;
constexpr int kOutputTensor = 0;
enum KernelType {
kVersionOne,
kVersionTwo,
};
struct OpData {
bool requires_broadcast;
bool has_low_rank_input_condition;
};
void* SelectInit(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
data->has_low_rank_input_condition = false;
return data;
}
void SelectFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
template <KernelType kernel_type>
TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input_condition;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorCondition,
&input_condition));
const TfLiteTensor* input_x;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorX, &input_x));
const TfLiteTensor* input_y;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorY, &input_y));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input_condition->type, kTfLiteBool);
TF_LITE_ENSURE_TYPES_EQ(context, input_x->type, input_y->type);
output->type = input_x->type;
if (GetTensorShape(input_condition).FlatSize() == 1 &&
GetTensorShape(input_x).FlatSize() == 1 &&
GetTensorShape(input_y).FlatSize() == 1 &&
GetTensorShape(output).FlatSize() == 1) {
return context->ResizeTensor(context, output, output->dims);
}
bool same_shape = HaveSameShapes(input_condition, input_x) &&
HaveSameShapes(input_x, input_y);
TfLiteIntArray* output_size;
if (!same_shape) {
switch (kernel_type) {
case kVersionOne: {
bool is_input_condition_scalar = NumDimensions(input_condition) == 0;
bool has_rank_one_input_condition =
NumDimensions(input_condition) == 1 &&
SizeOfDimension(input_condition, 0) == SizeOfDimension(input_x, 0);
data->has_low_rank_input_condition =
is_input_condition_scalar || has_rank_one_input_condition;
TF_LITE_ENSURE(context, data->has_low_rank_input_condition);
output_size = TfLiteIntArrayCopy(input_x->dims);
TF_LITE_ENSURE(context, HaveSameShapes(input_x, input_y));
break;
}
case kVersionTwo: {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input_condition, input_x,
input_y, &output_size));
data->requires_broadcast = true;
break;
}
default:
return kTfLiteError;
}
} else {
output_size = TfLiteIntArrayCopy(input_x->dims);
}
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input_condition;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorCondition,
&input_condition));
const TfLiteTensor* input_x;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorX, &input_x));
const TfLiteTensor* input_y;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorY, &input_y));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
#define TF_LITE_SELECT(type, op) \
reference_ops::op(GetTensorShape(input_condition), \
GetTensorData<bool>(input_condition), \
GetTensorShape(input_x), GetTensorData<type>(input_x), \
GetTensorShape(input_y), GetTensorData<type>(input_y), \
GetTensorShape(output), GetTensorData<type>(output));
#define TF_LITE_SWITCH(type, op) \
switch (type) { \
break; \
case kTfLiteBool: \
TF_LITE_SELECT(bool, op); \
break; \
case kTfLiteFloat32: \
TF_LITE_SELECT(float, op); \
break; \
case kTfLiteUInt8: \
TF_LITE_SELECT(uint8_t, op); \
break; \
case kTfLiteInt8: \
TF_LITE_SELECT(int8_t, op); \
break; \
case kTfLiteUInt32: \
TF_LITE_SELECT(uint32_t, op); \
break; \
case kTfLiteInt16: \
TF_LITE_SELECT(int16_t, op); \
break; \
case kTfLiteInt32: \
TF_LITE_SELECT(int32_t, op); \
break; \
case kTfLiteInt64: \
TF_LITE_SELECT(int64_t, op); \
break; \
default: \
TF_LITE_KERNEL_LOG(context, \
"Does not support type other than bool|float|int, " \
"got %d", \
type); \
return kTfLiteError; \
}
if (data->has_low_rank_input_condition) {
TF_LITE_SWITCH(input_x->type, RankOneSelect);
} else if (data->requires_broadcast) {
TF_LITE_SWITCH(input_x->type, BroadcastSelect5DSlow);
} else {
TF_LITE_SWITCH(input_x->type, Select);
}
#undef TF_LITE_SELECT
#undef TF_LITE_SWITCH
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SELECT() {
static TfLiteRegistration r = {select::SelectInit, select::SelectFree,
select::SelectPrepare<select::kVersionOne>,
select::SelectEval};
return &r;
}
TfLiteRegistration* Register_SELECT_V2() {
static TfLiteRegistration r = {select::SelectInit, select::SelectFree,
select::SelectPrepare<select::kVersionTwo>,
select::SelectEval};
return &r;
}
}
}
} | #include <memory>
#include <vector>
#include "xla/client/global_data.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class SelectTest : public ClientLibraryTestBase {
public:
ErrorSpec error_spec_{0.0001};
};
TEST_F(SelectTest, SelectScalarF32True) {
XlaBuilder builder(TestName());
auto pred = ConstantR0<bool>(&builder, true);
auto on_true = ConstantR0<float>(&builder, 123.0f);
auto on_false = ConstantR0<float>(&builder, 42.0f);
Select(pred, on_true, on_false);
ComputeAndCompareR0<float>(&builder, 123.0f, {}, error_spec_);
}
TEST_F(SelectTest, SelectScalarS32True) {
XlaBuilder builder(TestName());
auto pred = ConstantR0<bool>(&builder, true);
auto on_true = ConstantR0<int32_t>(&builder, -42);
auto on_false = ConstantR0<int32_t>(&builder, 42);
Select(pred, on_true, on_false);
ComputeAndCompareR0<int32_t>(&builder, -42, {});
}
TEST_F(SelectTest, SelectScalarF32False) {
XlaBuilder builder(TestName());
auto pred = ConstantR0<bool>(&builder, false);
auto on_true = ConstantR0<float>(&builder, 123.0f);
auto on_false = ConstantR0<float>(&builder, 42.0f);
Select(pred, on_true, on_false);
ComputeAndCompareR0<float>(&builder, 42.0f, {}, error_spec_);
}
XLA_TEST_F(SelectTest, SelectR1S0F32WithConstantR1S0PRED) {
XlaBuilder builder(TestName());
auto pred = ConstantR1<bool>(&builder, {});
auto on_true = ConstantR1<float>(&builder, {});
auto on_false = ConstantR1<float>(&builder, {});
Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithConstantR1PRED) {
XlaBuilder builder(TestName());
auto pred = ConstantR1<bool>(&builder, {false, true, false, true, false});
auto on_true =
ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
auto on_false =
ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {10.0f, 25.5f, 1.0f, -10.0f, -6.0f}, {},
error_spec_);
}
XLA_TEST_F(SelectTest, SelectR1S0F32WithCmpR1S0S32s) {
XlaBuilder builder(TestName());
auto v1 = ConstantR1<int32_t>(&builder, {});
auto v2 = ConstantR1<int32_t>(&builder, {});
auto cmp = Eq(v1, v2);
auto on_true = ConstantR1<float>(&builder, {});
auto on_false = ConstantR1<float>(&builder, {});
Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithCmpR1S32s) {
XlaBuilder builder(TestName());
auto v1 = ConstantR1<int32_t>(&builder, {1, 2, 3, 4, 5});
auto v2 = ConstantR1<int32_t>(&builder, {9, 2, 9, 4, 9});
auto cmp = Eq(v1, v2);
auto on_true =
ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
auto on_false =
ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {10.0f, 25.5f, 1.0f, -10.0f, -6.0f}, {},
error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithCmpR1F32s) {
XlaBuilder builder(TestName());
auto v1 = ConstantR1<float>(&builder, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f});
auto v2 = ConstantR1<float>(&builder, {-1.0f, -2.0f, 13.0f, 14.0f, 4.4f});
auto cmp = Gt(v1, v2);
auto on_true =
ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
auto on_false =
ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {-2.5f, 25.5f, 1.0f, 10.0f, 6.0f}, {},
error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithCmpR1F32sFromParamsSmall) {
XlaBuilder builder(TestName());
XlaOp v1, v2;
std::unique_ptr<GlobalData> param0_data = CreateR1Parameter<float>(
{41.0f, 2.0f, 3.0f, 84.0f}, 0, "v1",
&builder, &v1);
std::unique_ptr<GlobalData> param1_data = CreateR1Parameter<float>(
{21.0f, 22.0f, 23.0f, 24.0f}, 1, "v2",
&builder, &v2);
auto cmp = Gt(v1, v2);
Select(cmp, v1, v2);
ComputeAndCompareR1<float>(&builder, {41.0f, 22.0f, 23.0f, 84.0f},
{param0_data.get(), param1_data.get()},
error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithCmpR1F32sFromParamsLarge) {
XlaBuilder builder(TestName());
constexpr int datalen = 15 * 1000;
std::vector<float> v1vec;
std::vector<float> v2vec;
std::vector<float> expected_vec;
v1vec.reserve(datalen);
v2vec.reserve(datalen);
expected_vec.reserve(datalen);
for (int i = 0; i < datalen; ++i) {
float smaller = i;
float larger = i * 2;
if (i < datalen / 3) {
v1vec.push_back(larger);
v2vec.push_back(smaller);
} else {
v1vec.push_back(smaller);
v2vec.push_back(larger);
}
expected_vec.push_back(larger);
}
XlaOp v1, v2;
std::unique_ptr<GlobalData> param0_data =
CreateR1Parameter<float>(v1vec, 0, "v1",
&builder, &v1);
std::unique_ptr<GlobalData> param1_data =
CreateR1Parameter<float>(v2vec, 1, "v2",
&builder, &v2);
auto cmp = Gt(v1, v2);
Select(cmp, v1, v2);
ComputeAndCompareR1<float>(&builder, expected_vec,
{param0_data.get(), param1_data.get()},
error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithCmpR1S32ToScalar) {
XlaBuilder builder(TestName());
auto v = ConstantR1<int32_t>(&builder, {1, -1, 2, -2});
auto s = ConstantR0<int32_t>(&builder, 0);
auto cmp = Gt(v, s);
auto on_true = ConstantR1<float>(&builder, {11.0f, 22.0f, 33.0f, 44.0f});
auto on_false =
ConstantR1<float>(&builder, {-111.0f, -222.0f, -333.0f, -444.0f});
Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {11.0f, -222.0f, 33.0f, -444.0f}, {},
error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithCmpR1F32ToScalar) {
XlaBuilder builder(TestName());
auto v = ConstantR1<float>(&builder, {1.0f, 2.0f, 3.0f, 4.0f});
auto s = ConstantR0<float>(&builder, 2.5f);
auto cmp = Gt(v, s);
auto on_true = ConstantR1<float>(&builder, {11.0f, 22.0f, 33.0f, 44.0f});
auto on_false =
ConstantR1<float>(&builder, {-111.0f, -222.0f, -333.0f, -444.0f});
Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {-111.0f, -222.0f, 33.0f, 44.0f}, {},
error_spec_);
}
XLA_TEST_F(SelectTest, SelectR1S0F32WithScalarPredicate) {
for (bool which : {false, true}) {
XlaBuilder builder(TestName());
auto pred = ConstantR0<bool>(&builder, which);
auto on_true = ConstantR1<float>(&builder, {});
auto on_false = ConstantR1<float>(&builder, {});
Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
}
TEST_F(SelectTest, SelectR1F32WithScalarPredicateTrue) {
XlaBuilder builder(TestName());
auto pred = ConstantR0<bool>(&builder, true);
auto on_true = ConstantR1<float>(&builder, {-2.5f, 25.5f});
auto on_false = ConstantR1<float>(&builder, {10.0f, 5.0f});
Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {-2.5f, 25.5f}, {}, error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithScalarPredicateFalse) {
XlaBuilder builder(TestName());
auto pred = ConstantR0<bool>(&builder, false);
auto on_true = ConstantR1<float>(&builder, {-2.5f, 25.5f});
auto on_false = ConstantR1<float>(&builder, {10.0f, 5.0f});
Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {10.0f, 5.0f}, {}, error_spec_);
}
}
} |
750 | cpp | tensorflow/tensorflow | concatenate | third_party/xla/xla/service/gpu/fusions/legacy/concatenate.cc | third_party/xla/xla/service/gpu/fusions/legacy/concatenate_test.cc | #ifndef XLA_SERVICE_GPU_FUSIONS_CONCATENATE_H_
#define XLA_SERVICE_GPU_FUSIONS_CONCATENATE_H_
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
const Shape& GetLargestConcatOperandShape(const HloFusionAnalysis& analysis);
class ConcatenateFusion : public KernelFusionEmitterBase {
public:
explicit ConcatenateFusion(const HloFusionAnalysis& analysis);
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override;
protected:
absl::Status EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const override;
private:
const HloFusionAnalysis& analysis_;
};
}
}
#endif
#include "xla/service/gpu/fusions/concatenate.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/parallel_loop_emitter.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/shape.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
const Shape& GetLargestConcatOperandShape(const HloFusionAnalysis& analysis) {
const HloInstruction& concat = analysis.fusion_hero(0).instruction();
int64_t dim = concat.concatenate_dimension();
auto less = [&](const HloInstruction* lhs, const HloInstruction* rhs) {
return lhs->shape().dimensions(dim) < rhs->shape().dimensions(dim);
};
HloInstruction* operand = *absl::c_max_element(concat.operands(), less);
return operand->shape();
}
ConcatenateFusion::ConcatenateFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis) {}
std::optional<IndexingMap> ConcatenateFusion::ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const {
return std::nullopt;
}
std::optional<IndexingMap> ConcatenateFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const {
return GetDefaultThreadIdIndexingMap(launch_dimensions(), 1,
GetLargestConcatOperandShape(analysis_),
ctx);
}
absl::Status ConcatenateFusion::EmitKernel(
IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const {
GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder);
FusedIrEmitter fused_emitter(elemental_emitter);
for (int i = 0; i < fusion.fused_parameters().size(); i++) {
fused_emitter.BindGenerator(
*fusion.fused_parameter(i), [&, i](llvm_ir::IrArray::Index index) {
return inputs[i].EmitReadArrayElement(index, builder);
});
}
llvm::Type* index_type =
GetIndexTypeForKernel(&fusion, launch_dims.launch_bound(), builder);
const HloInstruction& concat = analysis_.fusion_hero(0).instruction();
int64_t concat_dim = concat.concatenate_dimension();
int64_t operand_offset = 0;
for (const HloInstruction* operand : concat.operands()) {
llvm_ir::BodyEmitter body_emitter =
[&](const llvm_ir::IrArray::Index& operand_index) -> absl::Status {
TF_ASSIGN_OR_RETURN(auto operand_generator,
fused_emitter.GetGenerator(*operand));
fused_emitter.BindGenerator(concat, [&](llvm_ir::IrArray::Index) {
return operand_generator(operand_index);
});
llvm_ir::IrArray::Index result_index = operand_index.AddOffsetToDim(
llvm::ConstantInt::get(index_type, operand_offset), concat_dim,
builder);
operand_offset += operand->shape().dimensions(concat_dim);
for (const auto& [output, root] :
llvm::zip_equal(outputs, analysis_.fusion_roots())) {
llvm_ir::IrArray::Index root_index = result_index.SourceIndexOfBitcast(
concat.shape(), root.shape(), builder);
TF_ASSIGN_OR_RETURN(auto generator,
fused_emitter.GetGenerator(root.instruction()));
TF_ASSIGN_OR_RETURN(llvm::Value * value, generator(root_index));
output.EmitWriteArrayElement(root_index, value, builder);
}
return absl::OkStatus();
};
ParallelLoopEmitter emitter(body_emitter, operand->shape(), launch_dims,
builder);
TF_RETURN_IF_ERROR(emitter.EmitLoop(fusion.name(), index_type));
}
return absl::OkStatus();
}
LaunchDimensions ConcatenateFusion::launch_dimensions() const {
return CalculateLaunchDimensions(GetLargestConcatOperandShape(analysis_),
analysis_.device_info());
}
}
} | #include "xla/service/gpu/fusions/concatenate.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class ConcatenateTest : public HloTestBase {
public:
void SetUp() override {
HloTestBase::SetUp();
printer_ =
AffineMapPrinter({"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"},
{"chunk_id", "unroll_id"});
}
protected:
AffineMapPrinter printer_;
mlir::MLIRContext mlir_context_;
};
TEST_F(ConcatenateTest, ThreadIndexing) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
param0 = f32[200] parameter(0)
param1 = f32[400] parameter(1)
param2 = f32[300] parameter(2)
ROOT concat = f32[900] concatenate(param0, param1, param2), dimensions={0}
}
ENTRY main {
param0 = f32[200] parameter(0)
param1 = f32[400] parameter(1)
param2 = f32[300] parameter(2)
ROOT fusion = f32[900] fusion(param0, param1, param2),
calls=fused_computation, kind=kLoop
}
)")
.value();
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused = AnalyzeFusion(*root, device_info);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<ConcatenateFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
constexpr auto kIndexing = R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
(bl_x * 128 + th_x) mod 400)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 4)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 400)
)";
EXPECT_THAT(
fusion
->ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_)
->ToString(printer_),
MatchIndexingString(kIndexing));
EXPECT_THAT(
fusion
->ComputeThreadIdToInputIndexing(
0, 1, &mlir_context_)
->ToString(printer_),
MatchIndexingString(kIndexing));
EXPECT_THAT(
fusion
->ComputeThreadIdToInputIndexing(
0, 2, &mlir_context_)
->ToString(printer_),
MatchIndexingString(kIndexing));
}
}
}
} |
751 | cpp | tensorflow/tensorflow | logistic | tensorflow/lite/experimental/shlo/ops/logistic.cc | tensorflow/lite/experimental/shlo/ops/logistic_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
#include <algorithm>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void Logistic(int32_t input_zero_point, int32_t input_range_radius,
int32_t input_multiplier, int32_t input_left_shift,
int32_t input_size, const int8_t* input_data,
int8_t* output_data) {
static constexpr int32_t kInputIntegerBits = 4;
static constexpr int32_t kOutputIntegerBits = 8;
static constexpr int8_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int8_t kMaxInt8 = std::numeric_limits<int8_t>::max();
static constexpr int32_t kOutputZeroPoint = -128;
for (int i = 0; i < input_size; ++i) {
const int32_t input =
static_cast<int32_t>(input_data[i]) - input_zero_point;
if (input <= -input_range_radius) {
output_data[i] = kMinInt8;
} else if (input >= input_range_radius) {
output_data[i] = kMaxInt8;
} else {
const int32_t input_in_q4 = MultiplyByQuantizedMultiplier(
input, input_multiplier, input_left_shift);
using FixedPoint4 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
const int32_t output_in_q0 =
gemmlowp::logistic(FixedPoint4::FromRaw(input_in_q4)).raw();
using gemmlowp::RoundingDivideByPOT;
int32_t output_in_q23 =
RoundingDivideByPOT(output_in_q0, 31 - kOutputIntegerBits);
output_in_q23 = std::min(std::max(output_in_q23 + kOutputZeroPoint,
static_cast<int32_t>(kMinInt8)),
static_cast<int32_t>(kMaxInt8));
output_data[i] = static_cast<int8_t>(output_in_q23);
}
}
}
inline void Logistic(int32_t input_multiplier, int32_t input_left_shift,
int32_t input_size, const int16_t* ptr_input_data,
int16_t* ptr_output_data) {
TFLITE_DCHECK_GE(input_left_shift, 0);
if (input_multiplier == 0) {
input_multiplier = 3 << input_left_shift;
input_left_shift = 0;
}
int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0;
for (int i = 0; i < input_size; ++i, ptr_input_data++, ptr_output_data++) {
int32_t input_data =
((*ptr_input_data) * input_multiplier + round) >> input_left_shift;
uint32_t abs_input_data = abs(input_data);
uint32_t uh = abs_input_data >> 9;
uint32_t result;
if (uh >= 255) {
result = 0x7FFF << 10;
} else {
uint32_t ua = sigmoid_table_uint16[uh];
uint32_t ub = sigmoid_table_uint16[uh + 1];
uint32_t ut = abs_input_data & 0x1ff;
result = (ua << 9) + ut * (ub - ua);
}
result = (input_data >= 0) ? (result + (1 << 9))
: ((1 << (16 + 9)) - result + (1 << 9) - 1);
result >>= 10;
*ptr_output_data = result;
}
}
}
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/logistic.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Logistic {
template <class T>
T operator()(T v) const {
constexpr T one = static_cast<T>(1);
return one / (one + std::exp(-v));
}
};
template <>
F16 Logistic::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 Logistic::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
LogisticOp Create(LogisticOp::Attributes) { return {}; }
absl::Status Prepare(LogisticOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("logistic"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("logistic"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(LogisticOp& op, const Tensor& input, Tensor& output) {
Logistic logistic;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), logistic,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
logistic, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.logistic: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Logistic, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).RelativeTolerance(1.0e+4f).Test(
BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
}
} |
752 | cpp | tensorflow/tensorflow | exponential_minus_one | tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc | tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_EXPONENTIAL_MINUS_ONE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_EXPONENTIAL_MINUS_ONE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct ExponentialMinusOneOp {
struct Attributes {};
};
ExponentialMinusOneOp Create(ExponentialMinusOneOp::Attributes);
absl::Status Prepare(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output);
absl::Status Evaluate(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct ExponentialMinusOne {
template <class T>
T operator()(T v) const {
return std::expm1(v);
}
};
template <>
F16 ExponentialMinusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 ExponentialMinusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
ExponentialMinusOneOp Create(ExponentialMinusOneOp::Attributes) { return {}; }
absl::Status Prepare(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("exponential_minus_one"), input,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("exponential_minus_one"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output) {
ExponentialMinusOne exponential_minus_one;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(),
exponential_minus_one, input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
exponential_minus_one, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.exponential_minus_one: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<ExponentialMinusOneOp> {
static std::string Get() { return "ExponentialMinusOne"; }
};
namespace {
struct ExponentialMinusOne {
template <class T>
T operator()(T v) const {
return std::expm1(v);
}
} exponential_minus_one_ref;
template <>
F16 ExponentialMinusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 ExponentialMinusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOne,
UnaryElementwiseOpShapePropagationTest,
ExponentialMinusOneOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
ExponentialMinusOne, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<ExponentialMinusOneOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<ExponentialMinusOneOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOneOp,
UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct ExponentialMinusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(ExponentialMinusOneTest, FloatTestTypes, TestParamNames);
TYPED_TEST(ExponentialMinusOneTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(),
exponential_minus_one_ref);
auto op = Create(ExponentialMinusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedExponentialMinusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedExponentialMinusOneTest, QuantizedTestTypes,
TestParamNames);
TYPED_TEST(QuantizedExponentialMinusOneTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res =
exponential_minus_one_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(ExponentialMinusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
753 | cpp | tensorflow/tensorflow | abs | tensorflow/lite/experimental/shlo/ops/abs.cc | tensorflow/lite/experimental/shlo/ops/abs_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_ABS_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_ABS_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct AbsOp {
struct Attributes {};
};
AbsOp Create(AbsOp::Attributes);
absl::Status Prepare(AbsOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(AbsOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/abs.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Abs {
template <class T>
T operator()(const T& val) {
return val < static_cast<T>(0) ? static_cast<T>(-val) : val;
}
};
AbsOp Create(typename AbsOp::Attributes) { return AbsOp{}; }
absl::Status Prepare(AbsOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("abs"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("abs"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(AbsOp& op, const Tensor& input, Tensor& output) {
Abs abs;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), abs, input,
output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), abs, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Abs, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_ABS,
xnnpack_delegate.get());
}
TEST(Abs, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
}
} |
754 | cpp | tensorflow/tensorflow | negate | tensorflow/lite/experimental/shlo/ops/negate.cc | tensorflow/lite/experimental/shlo/ops/negate_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_NEGATE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_NEGATE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct NegateOp {
struct Attributes {};
};
NegateOp Create(NegateOp::Attributes);
absl::Status Prepare(NegateOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(NegateOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Negate : std::negate<void> {};
NegateOp Create(NegateOp::Attributes) { return {}; }
absl::Status Prepare(NegateOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("negate"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("negate"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(NegateOp& op, const Tensor& input, Tensor& output) {
Negate negate;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), negate,
input, output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), negate, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.negate: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<NegateOp> {
static std::string Get() { return "Negate"; }
};
namespace {
struct Negate : std::negate<void> {
} negate_ref;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseOpShapePropagationTest,
NegateOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Negate, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<NegateOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<NegateOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct NegateTest : ::testing::Test {};
TYPED_TEST_SUITE(NegateTest, ArithmeticTestTypes, TestParamNames);
TYPED_TEST(NegateTest, ArithmeticTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), negate_ref);
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedNegateTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedNegateTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedNegateTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = negate_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
755 | cpp | tensorflow/tensorflow | and | tensorflow/lite/experimental/shlo/ops/and.cc | tensorflow/lite/experimental/shlo/ops/and_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_AND_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_AND_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct AndOp {
struct Attributes {};
};
AndOp Create(AndOp::Attributes);
absl::Status Prepare(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
AndOp Create(AndOp::Attributes) { return {}; }
absl::Status Prepare(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("and"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
And<DataType::kSI32> and_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
and_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
And<DataType::kI1> and_func;
detail::EvaluateNoQuantization<DataType::kI1>(and_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.and: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<AndOp> {
static std::string Get() { return "And"; }
};
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
template <>
struct SupportedOpDataType<AndOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseOpShapePropagationTest,
AndOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
AndOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
And, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<AndOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct AndTest : ::testing::Test {};
TYPED_TEST_SUITE(AndTest, SupportedTypes, TestParamNames);
TYPED_TEST(AndTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
And<TypeParam::kStorage>());
auto op = Create(AndOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} |
756 | cpp | tensorflow/tensorflow | divide | tensorflow/lite/experimental/shlo/ops/divide.cc | tensorflow/lite/experimental/shlo/ops/divide_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_DIVIDE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_DIVIDE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct DivideOp {
struct Attributes {};
};
DivideOp Create(DivideOp::Attributes);
absl::Status Prepare(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/divide.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Divide : std::divides<void> {};
DivideOp Create(DivideOp::Attributes) { return {}; }
absl::Status Prepare(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("divide"), lhs,
IsIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("divide"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("divide"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Divide divide;
if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), divide, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
divide, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.divide: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/divide.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<DivideOp> {
static std::string Get() { return "Divide"; }
};
struct Divide : std::divides<void> {};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Divide, BinaryElementwiseOpShapePropagationTest,
DivideOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
DivideOp,
ConcatTypes<BaselineConstraintIntTypes, BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Divide, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<DivideOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Divide, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<ArithmeticTestTypes>;
template <class T>
struct DivideTest : ::testing::Test {};
TYPED_TEST_SUITE(DivideTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(DivideTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), Divide());
auto op = Create(DivideOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedDivideTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedDivideTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedDivideTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data = RandomBuffer<TypeParam::kStorage>(
shape, zero_point + 1, zero_point + 5);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Divide()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(DivideOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} |
757 | cpp | tensorflow/tensorflow | not | tensorflow/lite/experimental/shlo/ops/not.cc | tensorflow/lite/experimental/shlo/ops/not_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_NOT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_NOT_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct NotOp {
struct Attributes {};
};
NotOp Create(NotOp::Attributes);
absl::Status Prepare(NotOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(NotOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/not.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Not {
template <class T>
T operator()(T v) const {
return static_cast<T>(~v);
}
};
template <>
bool Not::operator()(bool v) const {
return !v;
}
NotOp Create(NotOp::Attributes) { return {}; }
absl::Status Prepare(NotOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("not"), input, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("not"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(NotOp& op, const Tensor& input, Tensor& output) {
Not not_func;
if (IsIntTensor(input) || IsBoolTensor(input)) {
DISPATCH_BOOL_INT(detail::EvaluateNoQuantization,
input.tensor_element_type(), not_func, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.not: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/not.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<NotOp> {
static std::string Get() { return "Not"; }
};
template <>
struct SupportedOpDataType<NotOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
struct Not {
template <class T>
T operator()(T v) const {
return ~v;
}
} not_ref;
template <>
bool Not::operator()(bool v) const {
return !v;
}
INSTANTIATE_TYPED_TEST_SUITE_P(Not, UnaryElementwiseOpShapePropagationTest,
NotOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Not, UnaryElementwiseSameBaselineElementTypeConstraintTest,
BaselineMismatchSignedIntegerTypes<NotOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<NotOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Not, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct BoolAndIntNotTest : ::testing::Test {};
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
TYPED_TEST_SUITE(BoolAndIntNotTest, SupportedTypes, TestParamNames);
TYPED_TEST(BoolAndIntNotTest, BoolAndIntTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), not_ref);
auto op = Create(NotOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
}
} |
758 | cpp | tensorflow/tensorflow | floor | tensorflow/lite/experimental/shlo/ops/floor.cc | tensorflow/lite/experimental/shlo/ops/floor_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Floor(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
int offset = i;
output_data[offset] = std::floor(input_data[offset]);
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/floor.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace floor {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
enum KernelType {
kReference,
kGenericOptimized,
};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
output->type = input->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
}
template <KernelType type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (type == kGenericOptimized) {
optimized_ops::Floor(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
reference_ops::Floor(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_FLOOR_REF() {
static TfLiteRegistration r = {nullptr,
nullptr, floor::Prepare,
floor::Eval<floor::kReference>};
return &r;
}
TfLiteRegistration* Register_FLOOR() {
static TfLiteRegistration r = {nullptr,
nullptr, floor::Prepare,
floor::Eval<floor::kGenericOptimized>};
return &r;
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Floor, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_FLOOR,
xnnpack_delegate.get());
}
TEST(Floor, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
}
} |
759 | cpp | tensorflow/tensorflow | count_leading_zeros | tensorflow/lite/experimental/shlo/ops/count_leading_zeros.cc | tensorflow/lite/experimental/shlo/ops/count_leading_zeros_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_COUNT_LEADING_ZEROS_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_COUNT_LEADING_ZEROS_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct CountLeadingZerosOp {
struct Attributes {};
};
CountLeadingZerosOp Create(CountLeadingZerosOp::Attributes);
absl::Status Prepare(CountLeadingZerosOp& op, const Tensor& input,
Tensor& output);
absl::Status Evaluate(CountLeadingZerosOp& op, const Tensor& input,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/count_leading_zeros.h"
#include <cstdint>
#include <type_traits>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct CountLeadingZeros {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::countl_zero(static_cast<uint8_t>(v << 4 | 0xf)));
} else {
return absl::countl_zero(static_cast<std::make_unsigned_t<T>>(v));
}
}
};
CountLeadingZerosOp Create(CountLeadingZerosOp::Attributes) { return {}; }
absl::Status Prepare(CountLeadingZerosOp& op, const Tensor& input,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("count_leading_zeros"), input, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("count_leading_zeros"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CountLeadingZerosOp& op, const Tensor& input,
Tensor& output) {
CountLeadingZeros count_leading_zeros;
if (IsIntTensor(input)) {
DISPATCH_INT(detail::EvaluateNoQuantization, input.tensor_element_type(),
count_leading_zeros, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.count_leading_zeros: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/count_leading_zeros.h"
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/numeric/bits.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CountLeadingZerosOp> {
static std::string Get() { return "CountLeadingZeros"; }
};
template <>
struct SupportedOpDataType<CountLeadingZerosOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
struct CountLeadingZeros {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::countl_zero(static_cast<uint8_t>(v << 4 | 0xf)));
} else {
return absl::countl_zero(static_cast<std::make_unsigned_t<T>>(v));
}
}
} count_leading_zeros_ref;
template <class T>
struct CountLeadingZerosFunctorTest : ::testing::Test {};
using CountLeadingZerosTypes = ::testing::Types<int32_t, int16_t, int8_t, I4>;
TYPED_TEST_SUITE(CountLeadingZerosFunctorTest, CountLeadingZerosTypes);
TYPED_TEST(CountLeadingZerosFunctorTest, GivesCorrectResults) {
int64_t bit_count = 8 * sizeof(TypeParam);
if constexpr (std::is_same_v<I4, TypeParam>) {
bit_count = 4;
}
EXPECT_EQ(count_leading_zeros_ref(std::numeric_limits<TypeParam>::lowest()),
0);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(-1)), 0);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(0)), bit_count);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(1)), bit_count - 1);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(2)), bit_count - 2);
EXPECT_EQ(count_leading_zeros_ref(std::numeric_limits<TypeParam>::max()), 1);
}
INSTANTIATE_TYPED_TEST_SUITE_P(CountLeadingZeros,
UnaryElementwiseOpShapePropagationTest,
CountLeadingZerosOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
CountLeadingZeros, UnaryElementwiseSameBaselineElementTypeConstraintTest,
BaselineMismatchSignedIntegerTypes<CountLeadingZerosOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<CountLeadingZerosOp, ConcatTypes<BoolTestType, FloatTestTypes,
PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(CountLeadingZeros,
UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CountLeadingZerosTest : ::testing::Test {};
TYPED_TEST_SUITE(CountLeadingZerosTest, IntTestTypes, TestParamNames);
TYPED_TEST(CountLeadingZerosTest, IntTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = IotaBuffer<TypeParam::kStorage>(shape, -12);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), count_leading_zeros_ref);
auto op = Create(CountLeadingZerosOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
}
} |
760 | cpp | tensorflow/tensorflow | sign | tensorflow/lite/experimental/shlo/ops/sign.cc | tensorflow/lite/experimental/shlo/ops/sign_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SIGN_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SIGN_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct SignOp {
struct Attributes {};
};
SignOp Create(SignOp::Attributes);
absl::Status Prepare(SignOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(SignOp& op, const Tensor& input, Tensor& output);
}
#endif
#include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace sign {
TfLiteStatus PointwiseUnaryOpPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 1);
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_shape);
}
template <typename Op, typename T>
TfLiteStatus PointwiseUnaryOpDoEval(
TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output) {
const T* data = tflite::GetTensorData<T>(input);
T* data_output = tflite::GetTensorData<T>(output);
const int64_t num_elements = NumElements(input);
for (int64_t i = 0; i < num_elements; ++i) {
data_output[i] = Op::template Eval<T>(data[i]);
}
return TfLiteStatus::kTfLiteOk;
}
template <typename Op>
TfLiteStatus PointwiseUnaryOpEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
switch (output->type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(
context,
(PointwiseUnaryOpDoEval<Op, float>(context, input, output)));
break;
case kTfLiteFloat64:
TF_LITE_ENSURE_OK(
context,
(PointwiseUnaryOpDoEval<Op, double>(context, input, output)));
break;
case kTfLiteInt32:
TF_LITE_ENSURE_OK(context, (PointwiseUnaryOpDoEval<Op, int32_t>(
context, input, output)));
break;
default: {
TF_LITE_KERNEL_LOG(context, "Unsupported datatype for sign output: %s",
TfLiteTypeGetName(output->type));
return TfLiteStatus::kTfLiteError;
}
}
return TfLiteStatus::kTfLiteOk;
}
struct Sign {
template <typename T>
static T Eval(T x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
};
}
TfLiteRegistration* Register_SIGN() {
static TfLiteRegistration r = {nullptr, nullptr,
sign::PointwiseUnaryOpPrepare,
sign::PointwiseUnaryOpEval<sign::Sign>};
return &r;
}
}
}
} | #include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
template <>
tflite::TensorType GetTTEnum<int32_t>() {
return tflite::TensorType_INT32;
}
class SignModel : public tflite::SingleOpModel {
public:
SignModel(tflite::TensorData x,
tflite::TensorData output) {
x_ = AddInput(x);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SIGN, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(x_)});
}
int x_;
int output_;
template <typename T>
std::vector<T> GetOutput(const std::vector<T>& x) {
PopulateTensor<T>(x_, x);
Invoke();
return ExtractVector<T>(output_);
}
};
template <typename Float>
class SignTestFloat : public ::testing::Test {
public:
using FloatType = Float;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(SignTestFloat, TestTypes);
TYPED_TEST(SignTestFloat, TestScalarFloat) {
using Float = typename TestFixture::FloatType;
tflite::TensorData x = {GetTTEnum<Float>(), {}};
tflite::TensorData output = {GetTTEnum<Float>(), {}};
SignModel m(x, output);
auto got = m.GetOutput<Float>({0.0});
ASSERT_EQ(got.size(), 1);
EXPECT_FLOAT_EQ(got[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({5.0})[0], 1.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({-3.0})[0], -1.0);
}
TYPED_TEST(SignTestFloat, TestBatchFloat) {
using Float = typename TestFixture::FloatType;
tflite::TensorData x = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData output = {GetTTEnum<Float>(), {4, 2, 1}};
SignModel m(x, output);
std::vector<Float> x_data = {0.8, -0.7, 0.6, -0.5, 0.4, -0.3, 0.2, 0.0};
auto got = m.GetOutput<Float>(x_data);
EXPECT_EQ(got, std::vector<Float>(
{1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 0.0}));
}
template <typename Int>
class SignTestInt : public ::testing::Test {
public:
using IntType = Int;
};
using TestTypesInt = ::testing::Types<int32_t>;
TYPED_TEST_SUITE(SignTestInt, TestTypesInt);
TYPED_TEST(SignTestInt, TestScalarInt) {
using Int = typename TestFixture::IntType;
tflite::TensorData x = {GetTTEnum<Int>(), {}};
tflite::TensorData output = {GetTTEnum<Int>(), {}};
SignModel m(x, output);
auto got = m.GetOutput<Int>({0});
ASSERT_EQ(got.size(), 1);
EXPECT_EQ(got[0], 0);
ASSERT_EQ(m.GetOutput<Int>({5})[0], 1);
ASSERT_EQ(m.GetOutput<Int>({-3})[0], -1);
}
TYPED_TEST(SignTestInt, TestBatchInt) {
using Int = typename TestFixture::IntType;
tflite::TensorData x = {GetTTEnum<Int>(), {4, 2, 1}};
tflite::TensorData output = {GetTTEnum<Int>(), {4, 2, 1}};
SignModel m(x, output);
EXPECT_EQ(m.GetOutput<Int>({0, -7, 6, -5, 4, -3, 2, 1}),
std::vector<Int>({0, -1, 1, -1, 1, -1, 1, 1}));
}
}
} |
761 | cpp | tensorflow/tensorflow | multiply | tensorflow/lite/experimental/shlo/ops/multiply.cc | tensorflow/lite/experimental/shlo/ops/multiply_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_MULTIPLY_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_MULTIPLY_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct MultiplyOp {
struct Attributes {};
};
MultiplyOp Create(MultiplyOp::Attributes);
absl::Status Prepare(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/multiply.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType expressed_type>
struct Multiply : std::multiplies<void> {};
template <>
struct Multiply<DataType::kI1> {
template <class T>
T operator()(const T& lhs, const T& rhs) const {
return static_cast<T>(lhs && rhs);
}
};
MultiplyOp Create(MultiplyOp::Attributes) { return {}; }
absl::Status Prepare(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("multiply"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("multiply"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("multiply"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsBoolTensor(lhs)) {
detail::EvaluateNoQuantization<DataType::kI1>(Multiply<DataType::kI1>(),
lhs, rhs, output);
return absl::OkStatus();
} else if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
Multiply<DataType::kF32> multiply;
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), multiply, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
Multiply<DataType::kF32> multiply;
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
multiply, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.multiply: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/multiply.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<MultiplyOp> {
static std::string Get() { return "Multiply"; }
};
template <DataType expressed_type>
struct Multiply : std::multiplies<void> {};
template <>
struct Multiply<DataType::kI1> {
template <class T>
T operator()(const T& lhs, const T& rhs) const {
return static_cast<T>(lhs && rhs);
}
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Multiply,
BinaryElementwiseOpShapePropagationTest,
MultiplyOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
MultiplyOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes,
BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Multiply, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes = WithOpTypes<MultiplyOp, PerAxisQuantizedTestTypes>;
INSTANTIATE_TYPED_TEST_SUITE_P(Multiply, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<BoolTestType, ArithmeticTestTypes>;
template <class T>
struct MultiplyTest : ::testing::Test {};
TYPED_TEST_SUITE(MultiplyTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(MultiplyTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Multiply<TypeParam::kStorage>());
auto op = Create(MultiplyOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedMultiplyTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedMultiplyTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedMultiplyTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Multiply<TypeParam::kExpressed>()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(MultiplyOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} |
762 | cpp | tensorflow/tensorflow | sine | tensorflow/lite/experimental/shlo/ops/sine.cc | tensorflow/lite/experimental/shlo/ops/sine_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SINE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SINE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct SineOp {
struct Attributes {};
};
SineOp Create(SineOp::Attributes);
absl::Status Prepare(SineOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(SineOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/sine.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sine {
template <class T>
T operator()(T v) const {
return std::sin(v);
}
};
template <>
F16 Sine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
SineOp Create(SineOp::Attributes) { return {}; }
absl::Status Prepare(SineOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("sine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SineOp& op, const Tensor& input, Tensor& output) {
Sine sine;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sine, input,
output)
} else if (!input.IsQuantized() && IsFloat(input.StorageType())) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
sine, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/sine.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<SineOp> {
static std::string Get() { return "Sine"; }
};
namespace {
struct Sine {
template <class T>
T operator()(T v) const {
return std::sin(v);
}
} sine_ref;
template <>
F16 Sine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Sine, UnaryElementwiseOpShapePropagationTest,
SineOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Sine, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<SineOp>, TestParamNames);
using UnsupportedTypes = WithOpTypes<
SineOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Sine, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct FloatSineTest : ::testing::Test {};
TYPED_TEST_SUITE(FloatSineTest, FloatTestTypes, TestParamNames);
TYPED_TEST(FloatSineTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const TensorType tensor_type =
TensorType{.shape = shape, .element_type = TypeParam::kStorage};
Tensor input_tensor{.type = tensor_type, .data = input_data.data()};
Tensor output_tensor{.type = tensor_type, .data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), sine_ref);
auto op = Create(SineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedSineTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedSineTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedSineTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedPerTensorTensorType tensor_type = {
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, zero_point, TypeParam::kExpressed, scale)};
Tensor input_tensor{.type = tensor_type, .data = input_data.data()};
Tensor output_tensor{.type = tensor_type, .data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = sine_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(SineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
763 | cpp | tensorflow/tensorflow | xor | tensorflow/lite/experimental/shlo/ops/xor.cc | tensorflow/lite/experimental/shlo/ops/xor_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_XOR_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_XOR_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct XorOp {
struct Attributes {};
};
XorOp Create(XorOp::Attributes);
absl::Status Prepare(XorOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(XorOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/xor.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct Xor : std::bit_xor<void> {};
template <>
struct Xor<DataType::kI1> {
template <class T>
bool operator()(T lhs, T rhs) const {
return static_cast<bool>(lhs) != static_cast<bool>(rhs);
}
};
XorOp Create(XorOp::Attributes) { return {}; }
absl::Status Prepare(XorOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("xor"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("xor"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("xor"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(XorOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
Xor<DataType::kSI32> xor_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
xor_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
Xor<DataType::kI1> xor_func;
detail::EvaluateNoQuantization<DataType::kI1>(xor_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.xor: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/xor.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<XorOp> {
static std::string Get() { return "Xor"; }
};
template <DataType>
struct Xor : std::bit_xor<void> {};
template <>
struct Xor<DataType::kI1> {
template <class T>
bool operator()(T lhs, T rhs) const {
return static_cast<bool>(lhs) != static_cast<bool>(rhs);
}
};
template <>
struct SupportedOpDataType<XorOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Xor, BinaryElementwiseOpShapePropagationTest,
XorOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
XorOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Xor, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<XorOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Xor, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct XorTest : ::testing::Test {};
TYPED_TEST_SUITE(XorTest, SupportedTypes, TestParamNames);
TYPED_TEST(XorTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Xor<TypeParam::kStorage>());
auto op = Create(XorOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} |
764 | cpp | tensorflow/tensorflow | subtract | tensorflow/lite/experimental/shlo/ops/subtract.cc | tensorflow/lite/experimental/shlo/ops/subtract_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SUBTRACT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SUBTRACT_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct SubtractOp {
struct Attributes {};
};
SubtractOp Create(SubtractOp::Attributes);
absl::Status Prepare(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/subtract.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Subtract : std::minus<void> {};
SubtractOp Create(SubtractOp::Attributes) { return {}; }
absl::Status Prepare(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("subtract"), lhs,
IsIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("subtract"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("subtract"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Subtract subtract;
if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), subtract, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
subtract, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.subtract: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/subtract.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<SubtractOp> {
static std::string Get() { return "Subtract"; }
};
struct Subtract : std::minus<void> {};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Subtract,
BinaryElementwiseOpShapePropagationTest,
SubtractOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
SubtractOp,
ConcatTypes<BaselineConstraintIntTypes, BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Subtract, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<SubtractOp,
ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Subtract, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<ArithmeticTestTypes>;
template <class T>
struct SubtractTest : ::testing::Test {};
TYPED_TEST_SUITE(SubtractTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(SubtractTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), Subtract());
auto op = Create(SubtractOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedSubtractTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedSubtractTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedSubtractTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Subtract()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(SubtractOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} |
765 | cpp | tensorflow/tensorflow | minimum | tensorflow/lite/experimental/shlo/ops/minimum.cc | tensorflow/lite/experimental/shlo/ops/minimum_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_MINIMUM_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_MINIMUM_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct MinimumOp {
struct Attributes {};
};
MinimumOp Create(MinimumOp::Attributes);
absl::Status Prepare(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/minimum.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Minimum {
template <class T>
constexpr auto operator()(const T a, const T b) {
return a < b ? a : b;
}
};
MinimumOp Create(MinimumOp::Attributes) { return {}; }
absl::Status Prepare(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("minimum"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("minimum"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("minimum"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Minimum minimum;
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), minimum, lhs, rhs,
output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
minimum, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.minimum: Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Minimum, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
}
} |
766 | cpp | tensorflow/tensorflow | log_plus_one | tensorflow/lite/experimental/shlo/ops/log_plus_one.cc | tensorflow/lite/experimental/shlo/ops/log_plus_one_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_LOG_PLUS_ONE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_LOG_PLUS_ONE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct LogPlusOneOp {
struct Attributes {};
};
LogPlusOneOp Create(LogPlusOneOp::Attributes);
absl::Status Prepare(LogPlusOneOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(LogPlusOneOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/log_plus_one.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct LogPlusOne {
template <class T>
T operator()(T v) const {
return std::log1p(v);
}
};
template <>
F16 LogPlusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 LogPlusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
LogPlusOneOp Create(LogPlusOneOp::Attributes) { return {}; }
absl::Status Prepare(LogPlusOneOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("log_plus_one"), input,
IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("log_plus_one"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(LogPlusOneOp& op, const Tensor& input, Tensor& output) {
LogPlusOne log_plus_one;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), log_plus_one,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
log_plus_one, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.log_plus_one: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/log_plus_one.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<LogPlusOneOp> {
static std::string Get() { return "LogPlusOne"; }
};
namespace {
struct LogPlusOne {
template <class T>
T operator()(T v) const {
return std::log1p(v);
}
} log_plus_one_ref;
template <>
F16 LogPlusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 LogPlusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(LogPlusOne,
UnaryElementwiseOpShapePropagationTest,
LogPlusOneOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
LogPlusOne, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<LogPlusOneOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<LogPlusOneOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(LogPlusOne, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct LogPlusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(LogPlusOneTest, FloatTestTypes, TestParamNames);
TYPED_TEST(LogPlusOneTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(
shape, static_cast<StorageT>(-0.99));
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), log_plus_one_ref);
auto op = Create(LogPlusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedLogPlusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedLogPlusOneTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedLogPlusOneTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
Vector<StorageT> input_data =
RandomBuffer<TypeParam::kStorage>(shape, zero_point);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = log_plus_one_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(LogPlusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
767 | cpp | tensorflow/tensorflow | ceil | tensorflow/lite/experimental/shlo/ops/ceil.cc | tensorflow/lite/experimental/shlo/ops/ceil_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Ceil(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = std::ceil(input_data[i]);
}
}
}
}
#endif
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace ceil {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
output->type = input->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (input->type != kTfLiteFloat32) {
TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Ceil");
}
optimized_ops::Ceil(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_CEIL() {
static TfLiteRegistration r = {nullptr,
nullptr, ceil::Prepare, ceil::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Ceil, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_CEIL,
xnnpack_delegate.get());
}
TEST(Ceil, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
}
} |
768 | cpp | tensorflow/tensorflow | sqrt | tensorflow/lite/experimental/shlo/ops/sqrt.cc | tensorflow/lite/experimental/shlo/ops/sqrt_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SQRT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_SQRT_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct SqrtOp {
struct Attributes {};
};
SqrtOp Create(SqrtOp::Attributes);
absl::Status Prepare(SqrtOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(SqrtOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/sqrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sqrt {
template <class T>
T operator()(T v) const {
return std::sqrt(v);
}
};
template <>
F16 Sqrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sqrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
SqrtOp Create(SqrtOp::Attributes) { return {}; }
absl::Status Prepare(SqrtOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("sqrt"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sqrt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SqrtOp& op, const Tensor& input, Tensor& output) {
Sqrt sqrt;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sqrt, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
sqrt, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.sqrt: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Sqrt, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_SQRT,
xnnpack_delegate.get());
}
TEST(Sqrt, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
}
} |
769 | cpp | tensorflow/tensorflow | maximum | tensorflow/lite/experimental/shlo/ops/maximum.cc | tensorflow/lite/experimental/shlo/ops/maximum_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_MAXIMUM_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_MAXIMUM_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct MaximumOp {
struct Attributes {};
};
MaximumOp Create(MaximumOp::Attributes);
absl::Status Prepare(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/maximum.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Maximum {
template <class T>
constexpr auto operator()(const T a, const T b) {
return a > b ? a : b;
}
};
MaximumOp Create(MaximumOp::Attributes) { return {}; }
absl::Status Prepare(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("maximum"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("maximum"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("maximum"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Maximum maximum;
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), maximum, lhs, rhs,
output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
maximum, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.maximum: Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Maximum, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
}
} |
770 | cpp | tensorflow/tensorflow | popcnt | tensorflow/lite/experimental/shlo/ops/popcnt.cc | tensorflow/lite/experimental/shlo/ops/popcnt_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_POPCNT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_POPCNT_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct PopcntOp {
struct Attributes {};
};
PopcntOp Create(PopcntOp::Attributes);
absl::Status Prepare(PopcntOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(PopcntOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/popcnt.h"
#include <cstdint>
#include <type_traits>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Popcnt {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::popcount(static_cast<uint8_t>(v & 0xf)));
} else {
return absl::popcount(static_cast<std::make_unsigned_t<T>>(v));
}
}
};
PopcntOp Create(PopcntOp::Attributes) { return {}; }
absl::Status Prepare(PopcntOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("popcnt"), input, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("popcnt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(PopcntOp& op, const Tensor& input, Tensor& output) {
Popcnt popcnt;
if (IsIntTensor(input)) {
DISPATCH_INT(detail::EvaluateNoQuantization, input.tensor_element_type(),
popcnt, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.popcnt: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/popcnt.h"
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/numeric/bits.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<PopcntOp> {
static std::string Get() { return "Popcnt"; }
};
template <>
struct SupportedOpDataType<PopcntOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
struct Popcnt {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::popcount(static_cast<uint8_t>(v & 0xf)));
} else {
return absl::popcount(static_cast<std::make_unsigned_t<T>>(v));
}
}
} popcnt_ref;
using PopcntTypes = ::testing::Types<int32_t, int16_t, int8_t, I4>;
template <class T>
struct PopcntFunctorTest : ::testing::Test {};
TYPED_TEST_SUITE(PopcntFunctorTest, PopcntTypes);
TYPED_TEST(PopcntFunctorTest, GivesCorrectResults) {
int64_t bit_count = 8 * sizeof(TypeParam);
if constexpr (std::is_same_v<I4, TypeParam>) {
bit_count = 4;
}
EXPECT_EQ(popcnt_ref(std::numeric_limits<TypeParam>::lowest()), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(-1)), bit_count);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(0)), 0);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(1)), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(2)), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(3)), 2);
EXPECT_EQ(popcnt_ref(std::numeric_limits<TypeParam>::max()), bit_count - 1);
}
INSTANTIATE_TYPED_TEST_SUITE_P(Popcnt, UnaryElementwiseOpShapePropagationTest,
PopcntOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Popcnt, UnaryElementwiseSameBaselineElementTypeConstraintTest,
BaselineMismatchSignedIntegerTypes<PopcntOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<PopcntOp, ConcatTypes<BoolTestType, FloatTestTypes,
PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Popcnt, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct PopcntTest : ::testing::Test {};
TYPED_TEST_SUITE(PopcntTest, IntTestTypes, TestParamNames);
TYPED_TEST(PopcntTest, IntTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = IotaBuffer<TypeParam::kStorage>(shape, -12);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), popcnt_ref);
auto op = Create(PopcntOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
}
} |
771 | cpp | tensorflow/tensorflow | or | tensorflow/lite/experimental/shlo/ops/or.cc | tensorflow/lite/experimental/shlo/ops/or_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_OR_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_OR_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct OrOp {
struct Attributes {};
};
OrOp Create(OrOp::Attributes);
absl::Status Prepare(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/or.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct Or : std::bit_or<void> {};
template <>
struct Or<DataType::kI1> : std::logical_or<void> {};
OrOp Create(OrOp::Attributes) { return {}; }
absl::Status Prepare(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("or"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("or"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("or"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
Or<DataType::kSI32> or_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
or_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
Or<DataType::kI1> or_func;
detail::EvaluateNoQuantization<DataType::kI1>(or_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.or: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/or.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<OrOp> {
static std::string Get() { return "Or"; }
};
template <DataType>
struct Or : std::bit_or<void> {};
template <>
struct Or<DataType::kI1> : std::logical_or<void> {};
template <>
struct SupportedOpDataType<OrOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Or, BinaryElementwiseOpShapePropagationTest,
OrOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
OrOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Or, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<OrOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Or, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct OrTest : ::testing::Test {};
TYPED_TEST_SUITE(OrTest, SupportedTypes, TestParamNames);
TYPED_TEST(OrTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Or<TypeParam::kStorage>());
auto op = Create(OrOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} |
772 | cpp | tensorflow/tensorflow | tanh | tensorflow/lite/experimental/shlo/ops/tanh.cc | tensorflow/lite/experimental/shlo/ops/tanh_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
#include <algorithm>
#include <limits>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void Tanh(int32_t input_zero_point, int32_t input_range_radius,
int32_t input_multiplier, int32_t input_shift,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& output_shape, int8_t* output_data) {
static constexpr int32_t kInputIntegerBits = 4;
static constexpr int32_t kOutputScale = 7;
static constexpr int32_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int32_t kMaxInt8 = std::numeric_limits<int8_t>::max();
using F4 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const int32_t input =
static_cast<int32_t>(input_data[i]) - input_zero_point;
if (input <= -input_range_radius) {
output_data[i] = kMinInt8;
} else if (input >= input_range_radius) {
output_data[i] = kMaxInt8;
} else {
const int32_t input_in_q4 =
MultiplyByQuantizedMultiplier(input, input_multiplier, input_shift);
const int32_t output_in_q0 =
gemmlowp::tanh(F4::FromRaw(input_in_q4)).raw();
using gemmlowp::RoundingDivideByPOT;
int32_t output_in_q24 =
RoundingDivideByPOT(output_in_q0, 31 - kOutputScale);
output_in_q24 = std::min(std::max(output_in_q24, kMinInt8), kMaxInt8);
output_data[i] = static_cast<int8_t>(output_in_q24);
}
}
}
inline void Tanh(int32_t input_multiplier, int32_t input_left_shift,
const RuntimeShape& input_shape, const int16_t* ptr_input_data,
const RuntimeShape& output_shape, int16_t* ptr_output_data) {
if (input_multiplier == 0) {
input_multiplier = 3 << input_left_shift;
input_left_shift = 0;
}
int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0;
int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i, ptr_input_data++, ptr_output_data++) {
int32_t input_data =
((*ptr_input_data) * input_multiplier + round) >> input_left_shift;
uint32_t abs_input_data = abs(input_data);
uint32_t uh = abs_input_data >> 8;
int32_t result;
if (uh >= 255) {
result = 0xFFFF << 8;
} else {
uint32_t ua = sigmoid_table_uint16[uh];
uint32_t ub = sigmoid_table_uint16[uh + 1];
uint8_t ut = abs_input_data & 0xFF;
result = (ua << 8) + ut * (ub - ua);
}
result = (input_data >= 0)
? (result - (1 << (14 + 9)) + (1 << (9 - 2)))
: (-result + (1 << (14 + 9)) + (1 << (9 - 2)) - 1);
result >>= (9 - 1);
*ptr_output_data = result;
}
}
}
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/tanh.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Tanh {
template <class T>
T operator()(T v) const {
return std::tanh(v);
}
};
template <>
F16 Tanh::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Tanh::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
TanhOp Create(TanhOp::Attributes) { return {}; }
absl::Status Prepare(TanhOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("tanh"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("tanh"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(TanhOp& op, const Tensor& input, Tensor& output) {
Tanh tanh;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), tanh, input,
output)
} else if (!input.IsQuantized() && IsFloat(input.StorageType())) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
tanh, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.tanh: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Tanh, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).RelativeTolerance(1.0e+4f).Test(
BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
}
} |
773 | cpp | tensorflow/tensorflow | exponential | tensorflow/lite/experimental/shlo/ops/exponential.cc | tensorflow/lite/experimental/shlo/ops/exponential_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_EXPONENTIAL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_EXPONENTIAL_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct ExponentialOp {
struct Attributes {};
};
ExponentialOp Create(ExponentialOp::Attributes);
absl::Status Prepare(ExponentialOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(ExponentialOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/exponential.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Exponential {
template <class T>
T operator()(T v) const {
return std::exp(v);
}
};
template <>
F16 Exponential::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Exponential::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
ExponentialOp Create(ExponentialOp::Attributes) { return {}; }
absl::Status Prepare(ExponentialOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cosine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cosine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(ExponentialOp& op, const Tensor& input, Tensor& output) {
Exponential exponential;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), exponential,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
exponential, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.tanh: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/exponential.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<ExponentialOp> {
static std::string Get() { return "Exponential"; }
};
namespace {
struct Exponential {
template <class T>
T operator()(T v) const {
return std::exp(v);
}
} exponential_ref;
template <>
F16 Exponential::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Exponential::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Exponential,
UnaryElementwiseOpShapePropagationTest,
ExponentialOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Exponential, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<ExponentialOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<ExponentialOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Exponential, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct ExponentialTest : ::testing::Test {};
TYPED_TEST_SUITE(ExponentialTest, FloatTestTypes, TestParamNames);
TYPED_TEST(ExponentialTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), exponential_ref);
auto op = Create(ExponentialOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedExponentialTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedExponentialTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedExponentialTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = exponential_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(ExponentialOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
774 | cpp | tensorflow/tensorflow | cbrt | tensorflow/lite/experimental/shlo/ops/cbrt.cc | tensorflow/lite/experimental/shlo/ops/cbrt_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_CBRT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_CBRT_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct CbrtOp {
struct Attributes {};
};
CbrtOp Create(CbrtOp::Attributes);
absl::Status Prepare(CbrtOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(CbrtOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/cbrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Cbrt {
template <class T>
T operator()(T v) const {
return std::cbrt(v);
}
};
template <>
F16 Cbrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cbrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CbrtOp Create(CbrtOp::Attributes) { return {}; }
absl::Status Prepare(CbrtOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cbrt"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cbrt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CbrtOp& op, const Tensor& input, Tensor& output) {
Cbrt cbrt;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), cbrt, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
cbrt, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/cbrt.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CbrtOp> {
static std::string Get() { return "Cbrt"; }
};
namespace {
struct Cbrt {
template <class T>
T operator()(T v) const {
return std::cbrt(v);
}
} cbrt_ref;
template <>
F16 Cbrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cbrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Cbrt, UnaryElementwiseOpShapePropagationTest,
CbrtOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Cbrt, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<CbrtOp>, TestParamNames);
using UnsupportedTypes = WithOpTypes<
CbrtOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Cbrt, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CbrtTest : ::testing::Test {};
TYPED_TEST_SUITE(CbrtTest, FloatTestTypes, TestParamNames);
TYPED_TEST(CbrtTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), cbrt_ref);
auto op = Create(CbrtOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedCbrtTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCbrtTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCbrtTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = cbrt_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(CbrtOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
775 | cpp | tensorflow/tensorflow | log | tensorflow/lite/experimental/shlo/ops/log.cc | third_party/xla/xla/tests/log_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_LOG_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_LOG_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct LogOp {
struct Attributes {};
};
LogOp Create(LogOp::Attributes);
absl::Status Prepare(LogOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(LogOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/log.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Log {
template <class T>
T operator()(T v) const {
return std::log(v);
}
};
template <>
F16 Log::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Log::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
LogOp Create(LogOp::Attributes) { return {}; }
absl::Status Prepare(LogOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("log"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("log"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(LogOp& op, const Tensor& input, Tensor& output) {
Log log;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), log, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
log, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.log: Unsupported tensor type.");
}
}; | #include <cmath>
#include <vector>
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class LogTest : public ClientLibraryTestBase {};
XLA_TEST_F(LogTest, LogZeroValues) {
XlaBuilder builder(TestName());
auto x = ConstantR3FromArray3D<float>(&builder, Array3D<float>(3, 0, 0));
Log(x);
ComputeAndCompareR3<float>(&builder, Array3D<float>(3, 0, 0), {},
ErrorSpec(0.0001));
}
TEST_F(LogTest, LogTenValues) {
std::vector<float> input = {-0.0, 1.0, 2.0, -3.0, -4.0,
5.0, 6.0, -7.0, -8.0, 9.0};
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, input);
Log(x);
std::vector<float> expected;
expected.reserve(input.size());
for (float f : input) {
expected.push_back(std::log(f));
}
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
}
} |
776 | cpp | tensorflow/tensorflow | cosine | tensorflow/lite/experimental/shlo/ops/cosine.cc | tensorflow/lite/experimental/shlo/ops/cosine_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_COSINE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_COSINE_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct CosineOp {
struct Attributes {};
};
CosineOp Create(CosineOp::Attributes);
absl::Status Prepare(CosineOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(CosineOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/cosine.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Cosine {
template <class T>
T operator()(T v) const {
return std::cos(v);
}
};
template <>
F16 Cosine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cosine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CosineOp Create(CosineOp::Attributes) { return {}; }
absl::Status Prepare(CosineOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cosine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cosine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CosineOp& op, const Tensor& input, Tensor& output) {
Cosine cosine;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), cosine,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
cosine, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/cosine.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CosineOp> {
static std::string Get() { return "Cosine"; }
};
namespace {
struct Cosine {
template <class T>
T operator()(T v) const {
return std::cos(v);
}
} cosine_ref;
template <>
F16 Cosine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cosine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Cosine, UnaryElementwiseOpShapePropagationTest,
CosineOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Cosine, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<CosineOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<CosineOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Cosine, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CosineTest : ::testing::Test {};
TYPED_TEST_SUITE(CosineTest, FloatTestTypes, TestParamNames);
TYPED_TEST(CosineTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), cosine_ref);
auto op = Create(CosineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedCosineTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCosineTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCosineTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = cosine_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(CosineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
777 | cpp | tensorflow/tensorflow | rgb_to_grayscale | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.cc | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_RGB_TO_GRAYSCALE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_RGB_TO_GRAYSCALE_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace rgb_to_grayscale {
const algo::Algo* Impl_RgbToGrayscale();
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rgb_to_grayscale {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline void ConvertRgbToGrayscale(dim_t batches, dim_t height, dim_t width,
const float* input_data, float* output_data) {
const dim_t output_num_pixels = batches * width * height;
constexpr float kRgb2GrayscaleKernel[] = {0.2989f, 0.5870f, 0.1140f};
const float* src_ptr = input_data;
float* dst_ptr = output_data;
for (int i = 0; i < output_num_pixels; ++i) {
*dst_ptr = kRgb2GrayscaleKernel[0] * src_ptr[0] +
kRgb2GrayscaleKernel[1] * src_ptr[1] +
kRgb2GrayscaleKernel[2] * src_ptr[2];
src_ptr += 3;
dst_ptr++;
}
}
void ComputeRgbToGrayscale(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
TFLITE_DCHECK(channels == 3);
MutableDataRef* output = outputs[0];
output->Resize({img_num_batches, img_height, img_width, 1});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertRgbToGrayscale(img_num_batches, img_height, img_width, img_data,
output_data);
}
}
const Algo* Impl_RgbToGrayscale() {
static const Algo rgb_to_grayscale = {&ComputeRgbToGrayscale, nullptr};
return &rgb_to_grayscale;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rgb_to_grayscale {
namespace {
struct RgbToGrayscaleTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class RgbToGrayscaleTest
: public ::testing::TestWithParam<RgbToGrayscaleTestParams> {};
TEST_P(RgbToGrayscaleTest, FloatPixelType) {
const RgbToGrayscaleTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rgb_to_grayscale = Impl_RgbToGrayscale();
rgb_to_grayscale->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
constexpr float kAbsError = 0.1f;
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
RgbToGrayscaleTests, RgbToGrayscaleTest,
testing::ValuesIn({
RgbToGrayscaleTestParams{{1, 3, 2, 3},
{11, 111, 211,
12, 112, 212,
21, 121, 221,
22, 122, 222,
31, 131, 231,
32, 132, 232},
{92.5f, 93.5f, 102.5f,
103.5f, 112.5f, 113.5f},
{1, 3, 2, 1}},
RgbToGrayscaleTestParams{{2, 3, 2, 3},
{11, 111, 211,
12, 112, 212,
21, 121, 221,
22, 122, 222,
31, 131, 231,
32, 132, 232,
51, 311, 411,
52, 312, 412,
61, 321, 421,
62, 322, 422,
71, 331, 431,
72, 332, 432},
{92.5f, 93.5f, 102.5f,
103.5f, 112.5f, 113.5f,
244.7f, 245.7f, 254.7f,
255.7f, 264.7f, 265.7f},
{2, 3, 2, 1}},
}));
}
}
} |
778 | cpp | tensorflow/tensorflow | flip_up_down | tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down.cc | tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_FLIP_UP_DOWN_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_FLIP_UP_DOWN_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace flip_up_down {
const algo::Algo* Impl_FlipUpDown();
}
}
#endif
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace flip_up_down {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
void FlipUpDown(dim_t batches, dim_t input_height, dim_t input_width,
const char* input_data, char* output_data, dim_t chunk_size) {
const dim_t row_stride = input_width * chunk_size;
const dim_t batch_stride = row_stride * input_height;
for (int b = 0; b < batches; ++b) {
const char* src_data_prt = input_data + b * batch_stride;
char* dst_data_prt = output_data + b * batch_stride;
for (int y = 0; y < input_height; ++y) {
const char* src_ptr_row =
src_data_prt + (input_height - y - 1) * row_stride;
char* dst_ptr_row = dst_data_prt + y * row_stride;
std::memcpy(dst_ptr_row, src_ptr_row, row_stride);
}
}
}
void ComputeFlipUpDown(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const char* img_data = reinterpret_cast<const char*>(img->Data());
const dim_t num_batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t num_channels = img->Dims()[3];
const dim_t chunk_size = TypeWidth(img->Type()) * num_channels;
MutableDataRef* output = outputs[0];
output->Resize({num_batches, height, width, num_channels});
char* output_data = reinterpret_cast<char*>(output->Data());
FlipUpDown(num_batches, height, width, img_data, output_data, chunk_size);
}
}
const Algo* Impl_FlipUpDown() {
static const Algo flip_up_down = {&ComputeFlipUpDown, nullptr};
return &flip_up_down;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace flip_up_down {
namespace {
struct FlipUpDownTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class FlipUpDownTest : public ::testing::TestWithParam<FlipUpDownTestParams> {};
TEST_P(FlipUpDownTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const FlipUpDownTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* flip_up_down = Impl_FlipUpDown();
flip_up_down->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
FlipUpDownTests, FlipUpDownTest,
testing::ValuesIn({
FlipUpDownTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
{31, 32, 33,
21, 22, 23,
11, 12, 13},
{1, 3, 3, 1}},
FlipUpDownTestParams{{1, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{31, 4, 32, 5, 33, 6,
21, 3, 22, 4, 23, 5,
11, 2, 12, 3, 13, 4},
{1, 3, 3, 2}},
FlipUpDownTestParams{{2, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6,
13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{31, 4, 32, 5, 33, 6,
21, 3, 22, 4, 23, 5,
11, 2, 12, 3, 13, 4,
33, 6, 32, 5, 31, 4,
23, 5, 22, 4, 21, 3,
13, 4, 12, 3, 11, 2},
{2, 3, 3, 2}},
}));
}
}
} |
779 | cpp | tensorflow/tensorflow | rotate | tensorflow/lite/experimental/ml_adjacent/algo/rotate.cc | tensorflow/lite/experimental/ml_adjacent/algo/rotate_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_ROTATE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_ROTATE_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace rotate {
const algo::Algo* Impl_Rotate();
}
}
#endif
#include <cmath>
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rotate {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline float DegreesToRadians(int angle) { return angle * M_PI / 180; }
void ComputeNewSize(dim_t src_width, dim_t src_height, int angle,
dim_t& dst_width, dim_t& dst_height) {
dst_width = src_width;
dst_height = src_height;
if (angle % 90 == 0) {
if (angle == 90 || angle == 270) {
dst_width = src_height;
dst_height = src_width;
}
} else {
const float angle_rad = DegreesToRadians(angle);
const float cos_angle = std::cos(angle_rad);
const float sin_angle = std::sin(angle_rad);
const int edge_x = src_width / 2;
const int edge_y = src_height / 2;
for (int y : {-edge_y, edge_y}) {
for (int x : {-edge_x, edge_x}) {
const int x_transformed =
static_cast<int>(std::floor(cos_angle * x + sin_angle * y));
const int y_transformed =
static_cast<int>(std::floor(-sin_angle * x + cos_angle * y));
if (std::abs(x_transformed) > dst_width / 2)
dst_width = 2 * std::abs(x_transformed);
if (std::abs(y_transformed) > dst_height / 2)
dst_height = 2 * std::abs(y_transformed);
}
}
}
}
void Rotate90(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
for (int x = 0; x < input_width; ++x) {
float* dst_ptr_row = dst_data_prt + x * dst_row_stride;
const float* src_ptr_pixel = src_ptr_row + x * pixel_stride;
float* dst_pixel_ptr =
dst_ptr_row + (output_width - y - 1) * pixel_stride;
for (int c = 0; c < depth; ++c) {
*dst_pixel_ptr++ = *src_ptr_pixel++;
}
}
}
}
}
void Rotate180(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int dst_pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
float* dst_ptr_row = dst_data_prt +
(output_height - y - 1) * dst_row_stride +
(output_width - 1) * dst_pixel_stride;
for (int x = 0; x < input_width; ++x) {
for (int c = 0; c < depth; ++c) {
dst_ptr_row[c] = src_ptr_row[c];
}
dst_ptr_row -= depth;
src_ptr_row += depth;
}
}
}
}
void Rotate270(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
for (int x = 0; x < input_width; ++x) {
float* dst_ptr_row =
dst_data_prt + (output_height - x - 1) * dst_row_stride;
const float* src_ptr_pixel = src_ptr_row + x * pixel_stride;
float* dst_pixel_ptr = dst_ptr_row + y * pixel_stride;
for (int c = 0; c < depth; ++c) {
*dst_pixel_ptr++ = *src_ptr_pixel++;
}
}
}
}
}
void RotateGeneric(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, int angle,
const float* input_data, float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
memset(
output_data, 0,
batches * output_width * output_height * depth * sizeof(output_data[0]));
const float angle_rad = DegreesToRadians(angle);
const float cos_angle = std::cos(angle_rad);
const float sin_angle = std::sin(angle_rad);
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = -output_height / 2; y < output_height / 2; ++y) {
for (int x = -output_width / 2; x < output_width / 2; ++x) {
const float x_transformed = cos_angle * x + sin_angle * y;
const float y_transformed = -sin_angle * x + cos_angle * y;
const int x_transformed_integer =
static_cast<int>(std::floor(x_transformed));
const int y_transformed_integer =
static_cast<int>(std::floor(y_transformed));
const int x_src_integer = x_transformed_integer + input_width / 2;
const int y_src_integer = y_transformed_integer + input_height / 2;
const int x0 = x_src_integer;
const int x1 = x_src_integer + 1;
const int y0 = y_src_integer;
const int y1 = y_src_integer + 1;
if (x0 < 0 || x0 >= input_width) continue;
if (x1 < 0 || x1 >= input_width) continue;
if (y0 < 0 || y0 >= input_height) continue;
if (y1 < 0 || y1 >= input_height) continue;
const float x_dist = x_transformed - x_transformed_integer;
const float y_dist = y_transformed - y_transformed_integer;
const float one_minus_x_dist = 1 - x_dist;
const float one_minus_y_dist = 1 - y_dist;
const float* src_ptr_row0 = src_data_prt + y0 * src_row_stride;
const float* src_ptr_row1 = src_data_prt + y1 * src_row_stride;
float* dst_row_ptr =
dst_data_prt + (y + output_height / 2) * dst_row_stride;
const float* src_ptr_pixel00 = src_ptr_row0 + x0 * pixel_stride;
const float* src_ptr_pixel10 = src_ptr_row0 + x1 * pixel_stride;
const float* src_ptr_pixel01 = src_ptr_row1 + x0 * pixel_stride;
const float* src_ptr_pixel11 = src_ptr_row1 + x1 * pixel_stride;
float* dst_pixel_ptr =
dst_row_ptr + (x + output_width / 2) * pixel_stride;
for (int c = 0; c < depth; ++c) {
const float v00 = *src_ptr_pixel00++;
const float v01 = *src_ptr_pixel01++;
const float v10 = *src_ptr_pixel10++;
const float v11 = *src_ptr_pixel11++;
*dst_pixel_ptr++ =
(v10 * one_minus_y_dist + v11 * y_dist) * x_dist +
(v00 * one_minus_y_dist + v01 * y_dist) * one_minus_x_dist;
}
}
}
}
}
void ComputeRotate(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 2);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t img_num_channels = img->Dims()[3];
const DataRef* angle = inputs[1];
const int angle_data = *reinterpret_cast<const int*>(angle->Data());
MutableDataRef* output = outputs[0];
dim_t new_width = 0;
dim_t new_height = 0;
ComputeNewSize(img_width, img_height, angle_data, new_width, new_height);
output->Resize({img_num_batches, new_height, new_width, img_num_channels});
float* output_data = reinterpret_cast<float*>(output->Data());
if (angle_data == 90) {
Rotate90(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
if (angle_data == 180) {
Rotate180(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
if (angle_data == 270) {
Rotate270(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
RotateGeneric(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, angle_data, img_data, output_data);
}
}
const Algo* Impl_Rotate() {
static const Algo rotate = {&ComputeRotate, nullptr};
return &rotate;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rotate.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rotate {
namespace {
struct RotateTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const int angle;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class RotateTest : public ::testing::TestWithParam<RotateTestParams> {};
TEST_P(RotateTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const RotateTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef angle(etype_t::i32);
angle.Resize({1});
ASSERT_EQ(angle.Bytes(), sizeof(int));
std::memcpy(angle.Data(), ¶ms.angle, angle.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rotate = Impl_Rotate();
rotate->process({&img, &angle}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
RotateTests, RotateTest,
testing::ValuesIn({
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
90,
{31, 21, 11,
32, 22, 12,
33, 23, 13},
{1, 3, 3, 1}},
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
180,
{33, 32, 31,
23, 22, 21,
13, 12, 11},
{1, 3, 3, 1}},
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
270,
{13, 23, 33,
12, 22, 32,
11, 21, 31},
{1, 3, 3, 1}},
RotateTestParams{{1, 8, 8, 1},
{1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1},
-45,
{
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.59f,
0.83f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.54f, 0.00f,
0.12f, 0.83f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.54f, 0.00f, 0.00f,
0.00f, 0.12f, 0.83f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.54f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.12f, 0.83f, 0.00f, 0.00f,
0.00f, 0.78f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.23f, 0.97f, 0.00f,
0.00f, 0.00f, 0.54f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.12f, 0.83f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.54f, 0.00f, 0.00f,
0.00f, 0.12f, 0.83f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.54f, 0.00f,
0.12f, 0.83f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.59f,
0.83f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
},
{1, 12, 12, 1}},
}));
}
}
} |
780 | cpp | tensorflow/tensorflow | crop | tensorflow/lite/experimental/ml_adjacent/algo/crop.cc | tensorflow/lite/experimental/ml_adjacent/algo/crop_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_CROP_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_CROP_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace crop {
const algo::Algo* Impl_CenterCrop();
const algo::Algo* Impl_CropToBoundingBox();
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace crop {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
inline void CropToBoundingBox(dim_t offset_height, dim_t offset_width,
dim_t out_height, dim_t out_width,
const DataRef* input, MutableDataRef* output) {
const dim_t in_height = input->Dims()[1];
const dim_t in_width = input->Dims()[2];
const dim_t num_channels = input->Dims()[3];
const dim_t chunk = TypeWidth(input->Type()) * num_channels;
const dim_t in_img_size = in_height * in_width;
const dim_t out_img_size = out_height * out_width;
for (int b = 0; b < input->Dims()[0]; ++b) {
for (int i = 0; i < out_height; ++i) {
const dim_t read_byte_ofs =
(in_img_size * b + (i + offset_height) * in_width + offset_width) *
chunk;
const void* read_start_addr =
reinterpret_cast<const char*>(input->Data()) + read_byte_ofs;
const dim_t write_byte_ofs = chunk * (out_img_size * b + i * out_width);
void* write_addr =
reinterpret_cast<char*>(output->Data()) + write_byte_ofs;
std::memcpy(write_addr, read_start_addr, chunk * out_width);
}
}
}
void ComputeCenterCrop(const InputPack& inputs, const OutputPack& outputs) {
#ifndef NDEBUG
TFLITE_CHECK(inputs.size() == 2);
TFLITE_CHECK(outputs.size() == 1);
#endif
const DataRef* img = inputs[0];
const DataRef* frac = inputs[1];
const double frac_data = *reinterpret_cast<const double*>(frac->Data());
const dim_t in_height = img->Dims()[1];
const dim_t out_height_offset = (in_height - in_height * frac_data) / 2;
const dim_t out_height = in_height - (2 * out_height_offset);
const dim_t in_width = img->Dims()[2];
const dim_t out_width_offset = (in_width - in_width * frac_data) / 2;
const dim_t out_width = in_width - (2 * out_width_offset);
MutableDataRef* output = outputs[0];
output->Resize({img->Dims()[0], out_height, out_width, img->Dims()[3]});
CropToBoundingBox(out_height_offset, out_width_offset, out_height, out_width,
img, output);
}
void ComputeCropToBoundingBox(const InputPack& inputs,
const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 5);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const DataRef* offset_height = inputs[1];
const dim_t offset_height_data =
*reinterpret_cast<const dim_t*>(offset_height->Data());
const DataRef* offset_width = inputs[2];
const dim_t offset_width_data =
*reinterpret_cast<const dim_t*>(offset_width->Data());
const DataRef* target_height = inputs[3];
const dim_t target_height_data =
*reinterpret_cast<const dim_t*>(target_height->Data());
const DataRef* target_width = inputs[4];
const dim_t target_width_data =
*reinterpret_cast<const dim_t*>(target_width->Data());
MutableDataRef* output = outputs[0];
output->Resize(
{img->Dims()[0], target_height_data, target_width_data, img->Dims()[3]});
CropToBoundingBox(offset_height_data, offset_width_data, target_height_data,
target_width_data, img, output);
}
}
const Algo* Impl_CenterCrop() {
static const Algo center_crop = {&ComputeCenterCrop, nullptr};
return ¢er_crop;
}
const Algo* Impl_CropToBoundingBox() {
static const Algo crop_to_bounding_box = {&ComputeCropToBoundingBox, nullptr};
return &crop_to_bounding_box;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include <cstring>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
using ::testing::ElementsAreArray;
namespace ml_adj {
namespace crop {
namespace {
std::vector<float> GetIotaVec(dim_t d1, dim_t d2, dim_t d3, dim_t d4) {
std::vector<float> res;
res.resize(d1 * d2 * d3 * d4);
std::iota(res.begin(), res.end(), 0);
return res;
}
struct CropCenterTestParams {
std::vector<dim_t> img_dims;
std::vector<float> img_data;
double frac;
std::vector<float> expected_data;
std::vector<dim_t> expected_shape;
};
class CropCenterTest : public testing::TestWithParam<CropCenterTestParams> {};
TEST_P(CropCenterTest, FloatPixelType) {
const CropCenterTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef frac(etype_t::f64);
frac.Resize({1});
ASSERT_EQ(frac.Bytes(), sizeof(double));
std::memcpy(frac.Data(), ¶ms.frac, frac.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* center_crop = Impl_CenterCrop();
center_crop->process({&img, &frac}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
EXPECT_THAT(absl::MakeSpan(out_data, output.NumElements()),
ElementsAreArray(params.expected_data));
}
INSTANTIATE_TEST_SUITE_P(
CropTests, CropCenterTest,
testing::ValuesIn({
CropCenterTestParams{{1, 4, 4, 1},
GetIotaVec(1, 4, 4, 1),
0.5,
{5, 6, 9, 10},
{1, 2, 2, 1}},
CropCenterTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0.5,
{6, 7, 8, 11, 12, 13, 16, 17, 18},
{1, 3, 3, 1}},
CropCenterTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
0.5,
{0, 1, 2, 3, 4, 5, 6, 7, 8},
{1, 3, 3, 1}},
CropCenterTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0.9,
GetIotaVec(1, 5, 5, 1),
{1, 5, 5, 1}},
CropCenterTestParams{
{1, 5, 5, 1}, GetIotaVec(1, 5, 5, 1), 0.2, {12}, {1, 1, 1, 1}},
CropCenterTestParams{{1, 2, 2, 2},
GetIotaVec(1, 2, 2, 2),
.7,
{0, 1, 2, 3, 4, 5, 6, 7},
{1, 2, 2, 2}},
CropCenterTestParams{
{1, 3, 3, 2}, GetIotaVec(1, 3, 3, 2), .1, {8, 9}, {1, 1, 1, 2}},
CropCenterTestParams{
{2, 3, 3, 1}, GetIotaVec(2, 3, 3, 1), .1, {4, 13}, {2, 1, 1, 1}},
CropCenterTestParams{{2, 3, 3, 2},
GetIotaVec(2, 3, 3, 2),
.1,
{8, 9, 26, 27},
{2, 1, 1, 2}},
}));
struct CropToBoundingBoxTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
dim_t offset_height;
dim_t offset_width;
dim_t target_height;
dim_t target_width;
const std::vector<dim_t> expected_shape;
const std::vector<float> expected_data;
};
class CropToBoundingBoxTest
: public testing::TestWithParam<CropToBoundingBoxTestParams> {};
TEST_P(CropToBoundingBoxTest, FloatPixelType) {
const CropToBoundingBoxTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef offset_height(etype_t::i32);
offset_height.Resize({1});
ASSERT_EQ(offset_height.Bytes(), sizeof(int));
std::memcpy(offset_height.Data(), ¶ms.offset_height,
offset_height.Bytes());
OwningVectorRef offset_width(etype_t::i32);
offset_width.Resize({1});
ASSERT_EQ(offset_width.Bytes(), sizeof(int));
std::memcpy(offset_width.Data(), ¶ms.offset_width, offset_width.Bytes());
OwningVectorRef target_height(etype_t::i32);
target_height.Resize({1});
ASSERT_EQ(target_height.Bytes(), sizeof(int));
std::memcpy(target_height.Data(), ¶ms.target_height,
target_height.Bytes());
OwningVectorRef target_width(etype_t::i32);
target_width.Resize({1});
ASSERT_EQ(target_width.Bytes(), sizeof(int));
std::memcpy(target_width.Data(), ¶ms.target_width, target_width.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* crop_to_bounding_box = Impl_CropToBoundingBox();
crop_to_bounding_box->process(
{&img, &offset_height, &offset_width, &target_height, &target_width},
{&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
EXPECT_THAT(absl::MakeSpan(out_data, output.NumElements()),
ElementsAreArray(params.expected_data));
}
INSTANTIATE_TEST_SUITE_P(
CropTests, CropToBoundingBoxTest,
testing::ValuesIn({
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0,
0,
2,
2,
{1, 2, 2, 1},
{0, 1,
5, 6}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
3,
3,
2,
2,
{1, 2, 2, 1},
{18, 19,
23, 24}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0,
3,
2,
2,
{1, 2, 2, 1},
{3, 4,
8, 9}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
2,
1,
3,
3,
{1, 3, 3, 1},
{11, 12, 13,
16, 17, 18,
21, 22, 23}},
CropToBoundingBoxTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
0,
0,
3,
3,
{1, 3, 3, 1},
{0, 1, 2,
3, 4, 5,
6, 7, 8}},
CropToBoundingBoxTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
1,
1,
1,
1,
{1, 1, 1, 1},
{4}},
CropToBoundingBoxTestParams{{1, 5, 5, 3},
GetIotaVec(1, 5, 5, 3),
2,
2,
2,
2,
{1, 2, 2, 3},
{36, 37, 38, 39, 40, 41,
51, 52, 53, 54, 55, 56}},
CropToBoundingBoxTestParams{{2, 5, 5, 2},
GetIotaVec(2, 5, 5, 2),
2,
2,
2,
2,
{2, 2, 2, 2},
{24, 25, 26, 27, 34, 35, 36, 37,
74, 75, 76, 77, 84, 85, 86, 87}},
}));
}
}
} |
781 | cpp | tensorflow/tensorflow | per_image_standardization | tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.cc | tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_PER_IMAGE_STANDARDIZATION_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_PER_IMAGE_STANDARDIZATION_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace per_image_standardization {
const algo::Algo* Impl_PerImageStandardization();
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cmath>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace per_image_standardization {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline void PerImageStandardization(dim_t batches, dim_t height, dim_t width,
dim_t num_channels, const float* input_data,
float* output_data) {
const dim_t num_pixels_per_image = height * width * num_channels;
const float inv_num_pixels_per_image = 1.0f / num_pixels_per_image;
for (ind_t b = 0; b < batches; ++b) {
const dim_t offset = b * num_pixels_per_image;
const float* input_ptr = input_data + offset;
float* output_ptr = output_data + offset;
float mean = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
mean += input_ptr[i];
}
mean *= inv_num_pixels_per_image;
float variance = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
const float diff = input_ptr[i] - mean;
variance += diff * diff * inv_num_pixels_per_image;
output_ptr[i] = diff;
}
const float inv_adjusted_stddev =
fmin(num_pixels_per_image, 1.0f / sqrt(variance));
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
output_ptr[i] *= inv_adjusted_stddev;
}
}
}
void ComputePerImageStandardization(const InputPack& inputs,
const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t img_num_channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({img_num_batches, img_height, img_width, img_num_channels});
float* output_data = reinterpret_cast<float*>(output->Data());
PerImageStandardization(img_num_batches, img_height, img_width,
img_num_channels, img_data, output_data);
}
}
const Algo* Impl_PerImageStandardization() {
static const Algo per_image_standardization = {
&ComputePerImageStandardization, nullptr};
return &per_image_standardization;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace per_image_standardization {
namespace {
struct PerImageStandardizationTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
};
class PerImageStandardizationTest
: public testing::TestWithParam<PerImageStandardizationTestParams> {};
TEST_P(PerImageStandardizationTest, FloatPixelType) {
const PerImageStandardizationTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* per_image_standardization = Impl_PerImageStandardization();
per_image_standardization->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), img.Dims());
constexpr float kAbsError = 0.01f;
float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
PerImageStandardizationTests, PerImageStandardizationTest,
testing::ValuesIn({
PerImageStandardizationTestParams{
{1, 2, 2, 1},
{1, 2,
3, 4},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407}},
PerImageStandardizationTestParams{
{2, 2, 2, 1},
{1, 2,
3, 4,
1, 2,
4, 8},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407,
-1.0257553, -0.65275335,
0.09325048, 1.5852581}},
PerImageStandardizationTestParams{
{2, 2, 2, 2},
{1, 2,
1, 3,
1, 4,
1, 5,
1, 2,
2, 2,
3, 2,
4, 2},
{-0.8451542, -0.16903085,
-0.8451542, 0.50709254,
-0.8451542, 1.1832159,
-0.8451542, 1.8593392,
-1.5075567, -0.30151135,
-0.30151135, -0.30151135,
0.904534, -0.30151135,
2.1105793, -0.30151135}},
}));
}
}
} |
782 | cpp | tensorflow/tensorflow | flip_left_right | tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.cc | tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_FLIP_LEFT_RIGHT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_FLIP_LEFT_RIGHT_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace flip_left_right {
const algo::Algo* Impl_FlipLeftRight();
}
}
#endif
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace flip_left_right {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
void FlipLeftRight(dim_t batches, dim_t input_height, dim_t input_width,
const char* input_data, char* output_data,
dim_t chunk_size) {
const dim_t row_stride = input_width * chunk_size;
const dim_t batch_stride = row_stride * input_height;
for (int b = 0; b < batches; ++b) {
const char* src_data_prt = input_data + b * batch_stride;
char* dst_data_prt = output_data + b * batch_stride;
for (int y = 0; y < input_height; ++y) {
const char* src_ptr_row =
src_data_prt + y * row_stride + (input_width - 1) * chunk_size;
char* dst_ptr_row = dst_data_prt + y * row_stride;
for (int x = 0; x < input_width; ++x) {
std::memcpy(dst_ptr_row, src_ptr_row, chunk_size);
src_ptr_row -= chunk_size;
dst_ptr_row += chunk_size;
}
}
}
}
void ComputeFlipLeftRight(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const char* img_data = reinterpret_cast<const char*>(img->Data());
const dim_t num_batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t num_channels = img->Dims()[3];
const dim_t chunk_size = TypeWidth(img->Type()) * num_channels;
MutableDataRef* output = outputs[0];
output->Resize({num_batches, height, width, num_channels});
char* output_data = reinterpret_cast<char*>(output->Data());
FlipLeftRight(num_batches, height, width, img_data, output_data, chunk_size);
}
}
const Algo* Impl_FlipLeftRight() {
static const Algo flip_left_right = {&ComputeFlipLeftRight, nullptr};
return &flip_left_right;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace flip_left_right {
namespace {
struct FlipLeftRightTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class FlipLeftRightTest
: public ::testing::TestWithParam<FlipLeftRightTestParams> {};
TEST_P(FlipLeftRightTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const FlipLeftRightTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* flip_left_right = Impl_FlipLeftRight();
flip_left_right->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
FlipLeftRightTests, FlipLeftRightTest,
testing::ValuesIn({
FlipLeftRightTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
{13, 12, 11,
23, 22, 21,
33, 32, 31},
{1, 3, 3, 1}},
FlipLeftRightTestParams{{1, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{1, 3, 3, 2}},
FlipLeftRightTestParams{{2, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6,
13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4,
11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{2, 3, 3, 2}},
}));
}
}
} |
783 | cpp | tensorflow/tensorflow | yuv_to_rgb | tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.cc | tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_YUV_TO_RGB_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_YUV_TO_RGB_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace yuv_to_rgb {
const algo::Algo* Impl_YuvToRgb();
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/algo/image_utils.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
constexpr float kYuv2RgbKernel[] = {1.0f, 0.0f,
1.13988303f,
1.0f, -0.394642334f,
-0.58062185f,
1.0f, 2.03206185f, 0.0f};
constexpr int kYuv2RgbKernelDim =
sizeof(kYuv2RgbKernel) / sizeof(kYuv2RgbKernel[0]);
void ComputeYuvToRgb(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* input_data = reinterpret_cast<const float*>(img->Data());
const dim_t batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({batches, height, width, channels});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertColorSpace(batches, height, width, input_data, output_data,
&kYuv2RgbKernel[0], kYuv2RgbKernelDim);
}
}
const Algo* Impl_YuvToRgb() {
static const Algo yuv_to_rgb = {&ComputeYuvToRgb, nullptr};
return &yuv_to_rgb;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
struct YuvToRgbTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class YuvToRgbTest : public ::testing::TestWithParam<YuvToRgbTestParams> {};
TEST_P(YuvToRgbTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const YuvToRgbTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* yuv_to_rgb = Impl_YuvToRgb();
yuv_to_rgb->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
YuvToRgbTests, YuvToRgbTest,
testing::ValuesIn({
YuvToRgbTestParams{{1, 3, 2, 3},
{
92.5f,
58.3f,
-71.5f,
93.5f,
58.3f,
-71.5f,
102.5f,
58.3f,
-71.5f,
103.5f,
58.3f,
-71.5f,
112.5f,
58.3f,
-71.5f,
113.5f,
58.3f,
-71.5f,
},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{1, 3, 2, 3}},
YuvToRgbTestParams{{2, 3, 2, 3},
{92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f,
92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232,
11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{2, 3, 2, 3}},
}));
}
}
} |
784 | cpp | tensorflow/tensorflow | resize | tensorflow/lite/experimental/ml_adjacent/algo/resize.cc | tensorflow/lite/experimental/ml_adjacent/algo/resize_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESIZE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESIZE_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewResizeNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/resize.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Resize : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const Resize2DAttributes&>(ctx.op_attr);
if (ctx.input_shapes[0][2] > ctx.output_shapes[0][2] ||
ctx.input_shapes[0][1] > ctx.output_shapes[0][1]) {
return absl::UnimplementedError(
"Downsampling is currently not supported by the resize op on GPU.");
}
if (ctx.output_shapes[0][2] != attr.new_shape.w ||
ctx.output_shapes[0][1] != attr.new_shape.h) {
return absl::InvalidArgumentError(
"Output size does not match new_size in attributes.");
}
if (ctx.input_shapes[0][3] != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError("Input/output channels mismatch.");
}
if (ctx.input_shapes[0][1] == 1 && ctx.input_shapes[0][2] == 1) {
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
"value_0 = $input_data_0[0, 0, gid.z]$;",
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"scale_factor",
float2(CalculateResizeScale(ctx.input_shapes[0][2],
ctx.output_shapes[0][2], attr),
CalculateResizeScale(ctx.input_shapes[0][1],
ctx.output_shapes[0][1], attr))},
};
std::string source;
if (attr.type == SamplingType::BILINEAR) {
if (attr.half_pixel_centers) {
source = "vec2 coord = (vec2(gid.xy) + 0.5) * $scale_factor$ - 0.5;";
} else {
source = "vec2 coord = vec2(gid.xy) * $scale_factor$;";
}
source += R"(
vec2 coord_floor = floor(coord);
ivec2 icoord_floor = ivec2(coord_floor);
ivec2 borders = ivec2($input_data_0_w$, $input_data_0_h$) - ivec2(1, 1);
ivec4 st;
st.xy = max(icoord_floor, ivec2(0, 0));
st.zw = min(icoord_floor + ivec2(1, 1), borders);
vec2 t = coord - coord_floor;
vec4 tex11 = $input_data_0[st.x, st.y, gid.z]$;
vec4 tex21 = $input_data_0[st.z, st.y, gid.z]$;
vec4 tex12 = $input_data_0[st.x, st.w, gid.z]$;
vec4 tex22 = $input_data_0[st.z, st.w, gid.z]$;
value_0 = mix(mix(tex11, tex21, t.x), mix(tex12, tex22, t.x), t.y);)";
} else if (attr.type == SamplingType::NEAREST) {
std::string fxc;
std::string fyc;
if (attr.half_pixel_centers) {
fxc = "(float(gid.x) + 0.5) * $scale_factor.x$";
fyc = "(float(gid.y) + 0.5) * $scale_factor.y$";
} else {
fxc = "float(gid.x) * $scale_factor.x$";
fyc = "float(gid.y) * $scale_factor.y$";
}
if (attr.align_corners) {
fxc += " + 0.5";
fyc += " + 0.5";
}
source += " ivec2 coord;\n";
source += " coord.x = int(" + fxc + ");\n";
source += " coord.y = int(" + fyc + ");\n";
source += " coord.x = max(0, coord.x);\n";
source += " coord.y = max(0, coord.y);\n";
source += " coord.x = min(coord.x, $input_data_0_w$ - 1);\n";
source += " coord.y = min(coord.y, $input_data_0_h$ - 1);\n";
source += R"(
value_0 = $input_data_0[coord.x, coord.y, gid.z]$;
)";
} else {
return absl::InvalidArgumentError("Unknown sampling type");
}
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewResizeNodeShader() {
return std::make_unique<Resize>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/resize.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(ResizeTest, Bilinear1x1x2To2x2x2) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 1, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 2, 2, 2);
Resize2DAttributes attr;
attr.align_corners = true;
attr.new_shape = HW(2, 2);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0}));
}
TEST(ResizeTest, Bilinear1x2x1To1x4x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 1, 4, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.new_shape = HW(1, 4);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 4.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.5, 4.0, 4.0}));
}
TEST(ResizeTest, Bilinear2x2x1To4x4x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 4, 4, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.new_shape = HW(4, 4);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 4.0, 6.0, 8.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.5, 4.0, 4.0, 3.5, 4.75, 6.0, 6.0, 6.0,
7.0, 8.0, 8.0, 6.0, 7.0, 8.0, 8.0}));
}
TEST(ResizeTest, Bilinear2x2x1To3x3x1WithoutHalfPixel) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.half_pixel_centers = false;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 1.666666, 2.0, 2.333333, 3.0,
3.333333, 3.0, 3.666666, 4.0}));
}
TEST(ResizeTest, Bilinear2x2x1To3x3x1WithHalfPixel) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.half_pixel_centers = true;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{1.0, 1.5, 2.0, 2.0, 2.5, 3.0, 3.0, 3.5, 4.0}));
}
TEST(ResizeTest, Nearest1x2x1To2x4x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 4, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.new_shape = HW(2, 4);
attr.type = SamplingType::NEAREST;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0}));
}
TEST(ResizeTest, NearestAlignCorners) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = true;
attr.half_pixel_centers = false;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::NEAREST;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {3.0f, 6.0f, 9.0f, 12.0f}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {3.0f, 6.0f, 6.0f, 9.0f, 12.0f, 12.0f,
9.0f, 12.0f, 12.0f}));
}
TEST(ResizeTest, NearestHalfPixelCenters) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.half_pixel_centers = true;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::NEAREST;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {3.0f, 6.0f, 9.0f, 12.0f}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {3.0f, 6.0f, 6.0f, 9.0f, 12.0f, 12.0f,
9.0f, 12.0f, 12.0f}));
}
}
}
}
} |
785 | cpp | tensorflow/tensorflow | rgb_to_yuv | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.cc | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_RGB_TO_YUV_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_RGB_TO_YUV_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace rgb_to_yuv {
const algo::Algo* Impl_RgbToYuv();
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/algo/image_utils.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rgb_to_yuv {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
constexpr float kRgb2YuvKernel[] = {0.299f, 0.587f, 0.114f,
-0.14714119f, -0.28886916f, 0.43601035f,
0.61497538f, -0.51496512f, -0.10001026f};
constexpr int kRgb2YuvKernelSize =
sizeof(kRgb2YuvKernel) / sizeof(kRgb2YuvKernel[0]);
void ComputeRgbToYuv(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* input_data = reinterpret_cast<const float*>(img->Data());
const dim_t batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
TFLITE_DCHECK(channels == 3);
MutableDataRef* output = outputs[0];
output->Resize({batches, height, width, channels});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertColorSpace(batches, height, width, input_data, output_data,
&kRgb2YuvKernel[0], kRgb2YuvKernelSize);
}
}
const Algo* Impl_RgbToYuv() {
static const Algo rgb_to_yuv = {&ComputeRgbToYuv, nullptr};
return &rgb_to_yuv;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rgb_to_yuv {
namespace {
struct Rgb2YuvTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class Rgb2YuvTest : public ::testing::TestWithParam<Rgb2YuvTestParams> {};
TEST_P(Rgb2YuvTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const Rgb2YuvTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rgb_to_yuv = Impl_RgbToYuv();
rgb_to_yuv->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
Rgb2YuvTests, Rgb2YuvTest,
testing::ValuesIn({
Rgb2YuvTestParams{{1, 3, 2, 3},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{
92.5f,
58.3f,
-71.5f,
93.5f,
58.3f,
-71.5f,
102.5f,
58.3f,
-71.5f,
103.5f,
58.3f,
-71.5f,
112.5f,
58.3f,
-71.5f,
113.5f,
58.3f,
-71.5f,
},
{1, 3, 2, 3}},
Rgb2YuvTestParams{{2, 3, 2, 3},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232,
11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f,
92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f},
{2, 3, 2, 3}},
}));
}
}
} |
786 | cpp | tensorflow/tensorflow | tfl_tensor_ref | tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.cc | tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_TFLITE_TFL_TENSOR_REF_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_TFLITE_TFL_TENSOR_REF_H_
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj::data {
class TflTensorRef : public DataRef {
public:
explicit TflTensorRef(const TfLiteTensor* tfl_tensor);
TflTensorRef(const TflTensorRef&) = delete;
TflTensorRef(TflTensorRef&&) = delete;
TflTensorRef& operator=(const TflTensorRef&) = delete;
TflTensorRef& operator=(TflTensorRef&&) = delete;
const void* Data() const override;
ind_t NumElements() const override;
size_t Bytes() const override;
private:
const TfLiteTensor* const tfl_tensor_;
};
class MutableTflTensorRef : public MutableDataRef {
public:
MutableTflTensorRef(TfLiteTensor* tfl_tensor, TfLiteContext* tfl_ctx);
MutableTflTensorRef(const MutableTflTensorRef&) = delete;
MutableTflTensorRef(MutableTflTensorRef&&) = delete;
MutableTflTensorRef& operator=(const MutableTflTensorRef&) = delete;
MutableTflTensorRef& operator=(MutableTflTensorRef&&) = delete;
const void* Data() const override;
ind_t NumElements() const override;
size_t Bytes() const override;
void Resize(dims_t&& dims) override;
void* Data() override;
private:
TfLiteTensor* const tfl_tensor_;
TfLiteContext* tfl_ctx_;
};
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include <cstddef>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace ml_adj {
namespace data {
using ::tflite::BuildTfLiteArray;
using ::tflite::TfLiteArrayUniquePtr;
using ::tflite::TfLiteTypeGetSize;
namespace {
etype_t TflToLibType(const TfLiteType tfl_type) {
switch (tfl_type) {
case kTfLiteFloat32:
return etype_t::f32;
case kTfLiteInt32:
return etype_t::i32;
case kTfLiteFloat64:
return etype_t::f64;
default:
return etype_t::i32;
}
}
}
TflTensorRef::TflTensorRef(const TfLiteTensor* tfl_tensor)
: DataRef(TflToLibType(tfl_tensor->type)), tfl_tensor_(tfl_tensor) {
dims_.assign(tfl_tensor->dims->data,
tfl_tensor->dims->data + tfl_tensor->dims->size);
}
const void* TflTensorRef::Data() const { return tfl_tensor_->data.data; }
ind_t TflTensorRef::NumElements() const {
return tfl_tensor_->bytes / TfLiteTypeGetSize(tfl_tensor_->type);
}
size_t TflTensorRef::Bytes() const { return tfl_tensor_->bytes; }
MutableTflTensorRef::MutableTflTensorRef(TfLiteTensor* tfl_tensor,
TfLiteContext* tfl_ctx)
: MutableDataRef(TflToLibType(tfl_tensor->type)),
tfl_tensor_(tfl_tensor),
tfl_ctx_(tfl_ctx) {
dims_.assign(tfl_tensor->dims->data,
tfl_tensor->dims->data + tfl_tensor->dims->size);
}
void MutableTflTensorRef::Resize(dims_t&& dims) {
TfLiteArrayUniquePtr<int> arr =
BuildTfLiteArray(std::vector<int>(dims.begin(), dims.end()));
TFLITE_CHECK_EQ(tfl_ctx_->ResizeTensor(tfl_ctx_, tfl_tensor_, arr.release()),
kTfLiteOk);
dims_ = dims;
}
const void* MutableTflTensorRef::Data() const { return tfl_tensor_->data.data; }
ind_t MutableTflTensorRef::NumElements() const {
return tfl_tensor_->bytes / TfLiteTypeGetSize(tfl_tensor_->type);
}
size_t MutableTflTensorRef::Bytes() const { return tfl_tensor_->bytes; }
void* MutableTflTensorRef::Data() { return tfl_tensor_->data.data; }
}
} | #include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace ml_adj {
namespace data {
namespace {
using ::testing::Each;
using ::tflite::BuildTfLiteTensor;
using ::tflite::DimsAre;
using ::tflite::NumElements;
using ::tflite::TensorUniquePtr;
TfLiteStatus SimpleResizeTensor(TfLiteContext*, TfLiteTensor* tensor,
TfLiteIntArray* new_size) {
TFLITE_CHECK(tensor->type == kTfLiteFloat32);
size_t num_bytes = NumElements(new_size) * sizeof(float);
TF_LITE_ENSURE_STATUS(TfLiteTensorRealloc(num_bytes, tensor));
if (tensor->dims != nullptr) {
TfLiteIntArrayFree(tensor->dims);
}
tensor->dims = new_size;
return kTfLiteOk;
}
std::unique_ptr<TfLiteContext> MakeSimpleContext() {
auto ctx = std::make_unique<TfLiteContext>();
ctx->ResizeTensor = SimpleResizeTensor;
return ctx;
}
TEST(ImmutableTensorRefTest, ConstructsAndManifestsTensorData) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 2}, kTfLiteDynamic);
std::fill(tfl_tensor->data.f, tfl_tensor->data.f + 4, 2.0f);
TflTensorRef ref(tfl_tensor.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<const float> data(reinterpret_cast<const float*>(ref.Data()), 4);
EXPECT_THAT(data, Each(2.0f));
}
TEST(MutableTensorRefTest, ConstructsAndManifestsTensorData) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 2}, kTfLiteDynamic);
std::fill(tfl_tensor->data.f, tfl_tensor->data.f + 4, 2.0f);
MutableTflTensorRef ref(tfl_tensor.get(), nullptr);
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<const float> data(reinterpret_cast<const float*>(ref.Data()), 4);
EXPECT_THAT(data, Each(2.0f));
}
TEST(MutableTensorRefTest, TensorRefWritesDataToTensor) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {3, 3}, kTfLiteDynamic);
MutableTflTensorRef ref(tfl_tensor.get(), nullptr);
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{3, 3}));
ASSERT_EQ(ref.Bytes(), 9 * sizeof(float));
absl::Span<float> data(reinterpret_cast<float*>(ref.Data()), 9);
std::fill(data.begin(), data.end(), 3.0f);
EXPECT_THAT(absl::Span<const float>(tfl_tensor->data.f, 9), Each(3.0f));
}
TEST(MutableTensorRefTest, ResizeIncreaseSize) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 3}, kTfLiteDynamic);
std::unique_ptr<TfLiteContext> ctx = MakeSimpleContext();
MutableTflTensorRef ref(tfl_tensor.get(), ctx.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 3}));
ASSERT_EQ(ref.Bytes(), 6 * sizeof(float));
ref.Resize({3, 3});
ASSERT_EQ(ref.Dims(), (dims_t{3, 3}));
ASSERT_EQ(ref.Bytes(), 9 * sizeof(float));
absl::Span<float> ref_data(reinterpret_cast<float*>(ref.Data()), 9);
ASSERT_THAT(tfl_tensor.get(), DimsAre({3, 3}));
ASSERT_EQ(tfl_tensor->bytes, ref.Bytes());
ASSERT_EQ(ref.Data(), tfl_tensor->data.data);
}
TEST(MutableTensorRefTest, ResizeDecreasesSize) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 3}, kTfLiteDynamic);
std::unique_ptr<TfLiteContext> ctx = MakeSimpleContext();
MutableTflTensorRef ref(tfl_tensor.get(), ctx.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 3}));
ASSERT_EQ(ref.Bytes(), 6 * sizeof(float));
ref.Resize({2, 2});
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<float> ref_data(reinterpret_cast<float*>(ref.Data()), 4);
ASSERT_THAT(tfl_tensor.get(), DimsAre({2, 2}));
ASSERT_EQ(tfl_tensor->bytes, ref.Bytes());
ASSERT_EQ(ref.Data(), tfl_tensor->data.data);
}
}
}
} |
787 | cpp | tensorflow/tensorflow | extern_call | tensorflow/lite/experimental/ml_adjacent/tflite/extern_call.cc | tensorflow/lite/experimental/ml_adjacent/tflite/extern_call_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_TFLITE_EXTERN_CALL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_TFLITE_EXTERN_CALL_H_
#include "tensorflow/lite/core/c/common.h"
namespace tflite::extern_call {
struct ExternCallOptions {
uint8_t func_id;
};
TfLiteRegistration* Register_EXTERN_CALL();
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/tflite/extern_call.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include "tensorflow/lite/experimental/ml_adjacent/algo/resize.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace extern_call {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::MutableTflTensorRef;
using ::ml_adj::data::TflTensorRef;
template <typename PackType>
struct PackDeleter {
void operator()(PackType* pack) {
if (pack == nullptr) return;
for (auto* d : *pack) {
if (d == nullptr) continue;
delete d;
}
delete pack;
}
};
template <typename PackType>
using UniquePack = std::unique_ptr<PackType, PackDeleter<PackType>>;
constexpr uint8_t kNumFuncs = 2;
static const Algo* const kReg[kNumFuncs] = {ml_adj::crop::Impl_CenterCrop(),
ml_adj::resize::Impl_Resize()};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
UniquePack<InputPack> lib_inputs(new InputPack());
for (int i = 0; i < NumInputs(node); ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
lib_inputs->push_back(new TflTensorRef(input));
}
UniquePack<OutputPack> lib_outputs(new OutputPack());
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
lib_outputs->push_back(new MutableTflTensorRef(output, context));
}
TF_LITE_ENSURE_EQ(context, node->custom_initial_data_size,
sizeof(ExternCallOptions));
const auto* const options =
reinterpret_cast<const ExternCallOptions*>(node->custom_initial_data);
TF_LITE_ENSURE(context,
options->func_id >= 0 && options->func_id < kNumFuncs);
const Algo* const algo = kReg[options->func_id];
algo->process(*lib_inputs, *lib_outputs);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EXTERN_CALL() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/tflite/extern_call.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
class ExternCallModel : public SingleOpModel {
public:
ExternCallModel(std::vector<TensorData> inputs,
std::vector<TensorData> outputs, uint8_t func_id) {
std::vector<std::vector<int>> input_shapes;
for (const auto& data : inputs) {
input_ids_.push_back(AddInput(data));
input_shapes.push_back(GetShape(input_ids_.back()));
}
for (const auto& data : outputs) {
output_ids_.push_back(AddOutput(data));
}
SetCustomOp("ExternCall", {func_id}, extern_call::Register_EXTERN_CALL);
BuildInterpreter(input_shapes);
}
const TfLiteTensor* Output(int output_id) {
return interpreter_->tensor(output_ids_[output_id]);
}
private:
std::vector<int> input_ids_;
std::vector<int> output_ids_;
};
namespace {
TEST(ExternCallTest, CropFunc) {
std::vector<TensorData> inputs = {{TensorType_FLOAT32, {1, 5, 5, 1}},
{TensorType_FLOAT64, {}}};
std::vector<TensorData> output = {{TensorType_FLOAT32, {}}};
ExternCallModel model(inputs, output, 0);
model.PopulateTensor<double>(1, {0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
ASSERT_NE(model.Output(0), nullptr);
ASSERT_THAT(model.Output(0), DimsAre({1, 3, 3, 1}));
}
TEST(ExternCallTest, ResizeTest) {
std::vector<TensorData> inputs = {{TensorType_FLOAT32, {1, 5, 5, 1}},
{TensorType_UINT32, {2}}};
std::vector<TensorData> output = {{TensorType_FLOAT32, {}}};
ExternCallModel model(inputs, output, 1);
model.PopulateTensor<uint32_t>(1, {3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
ASSERT_NE(model.Output(0), nullptr);
ASSERT_THAT(model.Output(0), DimsAre({1, 3, 3, 1}));
}
}
} |
788 | cpp | tensorflow/tensorflow | owning_vector_ref | tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.cc | tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_DATA_OWNING_VECTOR_REF_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_DATA_OWNING_VECTOR_REF_H_
#include <vector>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
class OwningVectorRef : public MutableDataRef {
public:
explicit OwningVectorRef(etype_t type) : MutableDataRef(type) {}
OwningVectorRef(const OwningVectorRef&) = delete;
OwningVectorRef(OwningVectorRef&&) = delete;
OwningVectorRef& operator=(const OwningVectorRef&) = delete;
OwningVectorRef& operator=(OwningVectorRef&&) = delete;
void Resize(dims_t&& dims) override;
const void* Data() const override;
void* Data() override;
ind_t NumElements() const override;
size_t Bytes() const override;
~OwningVectorRef() override = default;
private:
std::vector<char> raw_data_buffer_;
ind_t num_elements_ = 0;
};
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <cstddef>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
void OwningVectorRef::Resize(dims_t&& dims) {
dims_ = dims;
num_elements_ = 0;
for (dim_t d : dims_) {
if (d <= 0) {
break;
}
if (num_elements_ == 0) {
num_elements_ = d;
} else {
num_elements_ *= d;
}
}
raw_data_buffer_.resize(num_elements_ * TypeWidth(Type()));
}
const void* OwningVectorRef::Data() const { return raw_data_buffer_.data(); }
void* OwningVectorRef::Data() { return raw_data_buffer_.data(); }
ind_t OwningVectorRef::NumElements() const { return num_elements_; }
size_t OwningVectorRef::Bytes() const {
return NumElements() * TypeWidth(Type());
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
TEST(OwningVectorRefTest, ConstructFloat32) {
OwningVectorRef t(etype_t::f32);
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 0);
EXPECT_EQ(t.Bytes(), 0);
EXPECT_THAT(t.Dims(), IsEmpty());
}
TEST(OwningVectorRefTest, ResizeFromEmptyFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({2, 2}));
EXPECT_EQ(t.NumElements(), 4);
ASSERT_EQ(t.Bytes(), 4 * sizeof(float));
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, ResizeDownFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
t.Resize({3});
ASSERT_THAT(t.Dims(), ElementsAreArray<dim_t>({3}));
EXPECT_EQ(t.NumElements(), 3);
ASSERT_EQ(t.Bytes(), 3 * sizeof(float));
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, IgnoresDimsForNumElementsAfterFirstNonPositive) {
OwningVectorRef t(etype_t::f32);
t.Resize({3, 0, 0, 2});
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 3);
EXPECT_EQ(t.Bytes(), 3 * sizeof(float));
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({3, 0, 0, 2}));
}
}
}
} |
789 | cpp | tensorflow/tensorflow | generate_testspec | tensorflow/lite/testing/generate_testspec.cc | tensorflow/lite/testing/generate_testspec_test.cc | #ifndef TENSORFLOW_LITE_TESTING_GENERATE_TESTSPEC_H_
#define TENSORFLOW_LITE_TESTING_GENERATE_TESTSPEC_H_
#include <algorithm>
#include <functional>
#include <iostream>
#include <vector>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
bool GenerateTestSpecFromTensorflowModel(
std::iostream& stream, const string& tensorflow_model_path,
const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer);
bool GenerateTestSpecFromTFLiteModel(
std::iostream& stream, const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer);
template <typename T, typename RandomFunction>
std::vector<T> GenerateRandomTensor(const std::vector<int>& shape,
RandomFunction random_func) {
int64_t num_elements = 1;
for (const int dim : shape) {
num_elements *= dim;
}
std::vector<T> result(num_elements);
std::generate_n(result.data(), num_elements, random_func);
return result;
}
}
}
#endif
#include "tensorflow/lite/testing/generate_testspec.h"
#include <iostream>
#include <random>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
#include "tensorflow/lite/testing/test_runner.h"
#include "tensorflow/lite/testing/tf_driver.h"
#include "tensorflow/lite/testing/tflite_driver.h"
namespace tflite {
namespace testing {
namespace {
template <typename T, typename RandomEngine, typename RandomDistribution>
void GenerateCsv(const string& name, const std::vector<int>& shape,
RandomEngine* engine, RandomDistribution distribution,
std::pair<string, string>* out) {
std::vector<T> data =
GenerateRandomTensor<T>(shape, [&]() { return distribution(*engine); });
*out = std::make_pair(name, Join(data.data(), data.size(), ","));
}
template <typename RandomEngine>
std::vector<std::pair<string, string>> GenerateInputValues(
RandomEngine* engine, const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape) {
std::vector<std::pair<string, string>> input_values;
input_values.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
tensorflow::DataType type;
CHECK(DataTypeFromString(input_layer_type[i], &type));
auto shape = Split<int>(input_layer_shape[i], ",");
const auto& name = input_layer[i];
switch (type) {
case tensorflow::DT_FLOAT:
GenerateCsv<float>(name, shape, engine,
std::uniform_real_distribution<float>(-0.5, 0.5),
&input_values[i]);
break;
case tensorflow::DT_UINT8:
GenerateCsv<uint8_t>(name, shape, engine,
std::uniform_int_distribution<uint32_t>(0, 255),
&input_values[i]);
break;
case tensorflow::DT_INT32:
GenerateCsv<int32_t>(name, shape, engine,
std::uniform_int_distribution<int32_t>(-100, 100),
&input_values[i]);
break;
case tensorflow::DT_INT64:
GenerateCsv<int64_t>(name, shape, engine,
std::uniform_int_distribution<int64_t>(-100, 100),
&input_values[i]);
break;
case tensorflow::DT_BOOL:
GenerateCsv<int>(name, shape, engine,
std::uniform_int_distribution<int>(0, 1),
&input_values[i]);
break;
default:
fprintf(stderr, "Unsupported type %d (%s) when generating testspec.\n",
type, input_layer_type[i].c_str());
input_values.clear();
return input_values;
}
}
return input_values;
}
bool GenerateTestSpecFromRunner(std::iostream& stream, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer,
TestRunner* runner) {
auto input_size = input_layer.size();
if (input_layer_shape.size() != input_size ||
input_layer_type.size() != input_size) {
fprintf(stderr,
"Input size not match. Expected %lu, got %lu input types, %lu "
"input shapes.\n",
input_size, input_layer_type.size(), input_layer_shape.size());
return false;
}
stream << "reshape {\n";
for (int i = 0; i < input_size; i++) {
const auto& name = input_layer[i];
const auto& shape = input_layer_shape[i];
stream << " input { key: \"" << name << "\" value: \"" << shape
<< "\" }\n";
}
stream << "}\n";
std::mt19937 random_engine;
for (int i = 0; i < num_invocations; ++i) {
auto input_values = GenerateInputValues(
&random_engine, input_layer, input_layer_type, input_layer_shape);
if (input_values.empty()) {
std::cerr << "Unable to generate input values for the TensorFlow model. "
"Make sure the correct values are defined for "
"input_layer, input_layer_type, and input_layer_shape."
<< std::endl;
return false;
}
runner->Invoke(input_values);
if (!runner->IsValid()) {
std::cerr << runner->GetErrorMessage() << std::endl;
return false;
}
stream << "invoke {\n";
for (const auto& entry : input_values) {
stream << " input { key: \"" << entry.first << "\" value: \""
<< entry.second << "\" }\n";
}
for (const auto& name : output_layer) {
stream << " output { key: \"" << name << "\" value: \""
<< runner->ReadOutput(name) << "\" }\n";
if (!runner->IsValid()) {
std::cerr << runner->GetErrorMessage() << std::endl;
return false;
}
}
stream << "}\n";
}
return true;
}
}
bool GenerateTestSpecFromTensorflowModel(
std::iostream& stream, const string& tensorflow_model_path,
const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer) {
CHECK_EQ(input_layer.size(), input_layer_type.size());
CHECK_EQ(input_layer.size(), input_layer_shape.size());
TfDriver runner(input_layer, input_layer_type, input_layer_shape,
output_layer);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.LoadModel(tensorflow_model_path);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
stream << "load_model: " << tflite_model_path << "\n";
return GenerateTestSpecFromRunner(stream, num_invocations, input_layer,
input_layer_type, input_layer_shape,
output_layer, &runner);
}
bool GenerateTestSpecFromTFLiteModel(
std::iostream& stream, const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer) {
TfLiteDriver runner;
runner.LoadModel(tflite_model_path);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.AllocateTensors();
return GenerateTestSpecFromRunner(stream, num_invocations, input_layer,
input_layer_type, input_layer_shape,
output_layer, &runner);
}
}
} | #include "tensorflow/lite/testing/generate_testspec.h"
#include <random>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(GenerateRandomTensor, FloatValue) {
std::mt19937 random_engine;
auto random_func = [&]() {
return std::uniform_real_distribution<float>(-0.5, 0.5)(random_engine);
};
std::set<float> values;
float sum_x_square = 0.0f;
float sum_x = 0.0f;
for (int i = 0; i < 100; i++) {
const auto& data = GenerateRandomTensor<float>({1, 3, 4}, random_func);
for (float value : data) {
values.insert(value);
sum_x_square += value * value;
sum_x += value;
}
}
EXPECT_GT(values.size(), 200);
int num = 1 * 3 * 4 * 100;
float stddev = sum_x_square / num - (sum_x / num) * (sum_x / num);
float minstddev = 1.0f / 12 / 2;
EXPECT_GT(stddev, minstddev);
}
}
}
} |
790 | cpp | tensorflow/tensorflow | tokenize | tensorflow/lite/testing/tokenize.cc | tensorflow/lite/testing/tokenize_test.cc | #ifndef TENSORFLOW_LITE_TESTING_TOKENIZE_H_
#define TENSORFLOW_LITE_TESTING_TOKENIZE_H_
#include <istream>
#include <string>
namespace tflite {
namespace testing {
class TokenProcessor {
public:
virtual ~TokenProcessor() {}
virtual void ConsumeToken(std::string* token) = 0;
};
void Tokenize(std::istream* input, TokenProcessor* processor);
}
}
#endif
#include "tensorflow/lite/testing/tokenize.h"
#include <istream>
#include <string>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
void Tokenize(std::istream* input, TokenProcessor* processor) {
enum State { kBuildQuotedToken, kBuildToken, kIdle };
std::string current_token;
State state = kIdle;
auto start_token = [&](char c) {
state = kBuildToken;
current_token.clear();
current_token = c;
};
auto issue_token = [&]() {
state = kIdle;
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto start_quoted_token = [&]() {
state = kBuildQuotedToken;
current_token.clear();
};
auto issue_quoted_token = [&]() {
state = kIdle;
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto issue_delim = [&](char d) {
current_token = string(1, d);
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto is_delim = [](char c) { return c == '{' || c == '}' || c == ':'; };
auto is_quote = [](char c) { return c == '"'; };
for (auto it = std::istreambuf_iterator<char>(*input);
it != std::istreambuf_iterator<char>(); ++it) {
switch (state) {
case kIdle:
if (is_delim(*it)) {
issue_delim(*it);
} else if (is_quote(*it)) {
start_quoted_token();
} else if (!isspace(*it)) {
start_token(*it);
}
break;
case kBuildToken:
if (is_delim(*it)) {
issue_token();
issue_delim(*it);
} else if (is_quote(*it)) {
issue_token();
start_quoted_token();
} else if (isspace(*it)) {
issue_token();
} else {
current_token += *it;
}
break;
case kBuildQuotedToken:
if (is_quote(*it)) {
issue_quoted_token();
} else {
current_token += *it;
}
break;
}
}
if (state != kIdle) {
issue_token();
}
}
}
} | #include "tensorflow/lite/testing/tokenize.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class TokenCollector : public TokenProcessor {
public:
void ConsumeToken(std::string* token) override { tokens_.push_back(*token); }
const std::vector<std::string>& Tokens() { return tokens_; }
private:
std::vector<std::string> tokens_;
};
std::vector<std::string> TokenizeString(const std::string& s) {
std::stringstream ss(s);
TokenCollector collector;
Tokenize(&ss, &collector);
return collector.Tokens();
}
TEST(TokenizeTest, TokenDetection) {
EXPECT_THAT(TokenizeString("x :1"), ElementsAre("x", ":", "1"));
EXPECT_THAT(TokenizeString("x:1"), ElementsAre("x", ":", "1"));
EXPECT_THAT(TokenizeString("x {1"), ElementsAre("x", "{", "1"));
EXPECT_THAT(TokenizeString("x{1"), ElementsAre("x", "{", "1"));
EXPECT_THAT(TokenizeString("x }1"), ElementsAre("x", "}", "1"));
EXPECT_THAT(TokenizeString("x}1"), ElementsAre("x", "}", "1"));
EXPECT_THAT(TokenizeString("x \"1"), ElementsAre("x", "1"));
EXPECT_THAT(TokenizeString("x\"1"), ElementsAre("x", "1"));
}
TEST(TokenizeTest, QuotedTokenDetection) {
EXPECT_THAT(TokenizeString("\"w:x{y}z\"1"), ElementsAre("w:x{y}z", "1"));
EXPECT_THAT(TokenizeString("\"w:x{y}z\"\"1\""), ElementsAre("w:x{y}z", "1"));
}
TEST(TokenizeTest, Delimiters) {
EXPECT_THAT(TokenizeString("}"), ElementsAre("}"));
EXPECT_THAT(TokenizeString("}}"), ElementsAre("}", "}"));
EXPECT_THAT(TokenizeString("{"), ElementsAre("{"));
EXPECT_THAT(TokenizeString("{{"), ElementsAre("{", "{"));
EXPECT_THAT(TokenizeString(":"), ElementsAre(":"));
EXPECT_THAT(TokenizeString("::"), ElementsAre(":", ":"));
}
TEST(TokenizeTest, CornerCases) {
EXPECT_THAT(TokenizeString(" i { b:a } "),
ElementsAre("i", "{", "b", ":", "a", "}"));
EXPECT_THAT(TokenizeString(" }"), ElementsAre("}"));
EXPECT_THAT(TokenizeString(" } "), ElementsAre("}"));
EXPECT_THAT(TokenizeString(" {} "), ElementsAre("{", "}"));
EXPECT_THAT(TokenizeString(" x{} y{} "),
ElementsAre("x", "{", "}", "y", "{", "}"));
EXPECT_THAT(TokenizeString("x:1 y:2 "),
ElementsAre("x", ":", "1", "y", ":", "2"));
EXPECT_THAT(TokenizeString("x:\"1\" y:2 "),
ElementsAre("x", ":", "1", "y", ":", "2"));
EXPECT_THAT(TokenizeString("x:\"1, 2\" y:\"\" "),
ElementsAre("x", ":", "1, 2", "y", ":", ""));
}
TEST(TokenizeTest, NewLines) {
EXPECT_THAT(TokenizeString("x:\n1,\n 2 \n y :\n3 \n"),
ElementsAre("x", ":", "1,", "2", "y", ":", "3"));
}
TEST(TokenizeTest, LongString) {
EXPECT_THAT(
TokenizeString(" i { b:a } input {"
"a: \"1e-1, 2,3\" b:\"1,2,3\"\n c{ "
"id:1 x{d{a:"
"1}}} f:2 "
"\n}\n t:1"),
ElementsAreArray({"i", "{", "b", ":", "a", "}", "input", "{",
"a", ":", "1e-1, 2,3", "b", ":", "1,2,3", "c", "{",
"id", ":", "1", "x", "{", "d", "{", "a",
":", "1", "}", "}", "}", "f", ":", "2",
"}", "t", ":", "1"}));
}
}
}
} |
791 | cpp | tensorflow/tensorflow | tflite_driver | tensorflow/lite/testing/tflite_driver.cc | tensorflow/lite/testing/tflite_driver_test.cc | #ifndef TENSORFLOW_LITE_TESTING_TFLITE_DRIVER_H_
#define TENSORFLOW_LITE_TESTING_TFLITE_DRIVER_H_
#include <stdlib.h>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/signature_runner.h"
#include "tensorflow/lite/testing/result_expectations.h"
#include "tensorflow/lite/testing/test_runner.h"
namespace tflite {
namespace testing {
class TfLiteDriver : public TestRunner {
public:
enum class DelegateType {
kNone,
kNnapi,
kGpu,
kFlex,
};
static bool InitTestDelegateProviders(int* argc, const char** argv);
explicit TfLiteDriver(DelegateType delegate_type = DelegateType::kNone,
bool reference_kernel = false);
~TfLiteDriver() override;
void LoadModel(const std::string& bin_file_path) override;
void LoadModel(const std::string& bin_file_path,
const std::string& signature) override;
void ReshapeTensor(const std::string& name,
const std::string& csv_values) override;
void ResetTensor(const std::string& name) override;
std::string ReadOutput(const std::string& name) override;
void Invoke(
const std::vector<std::pair<std::string, std::string>>& inputs) override;
bool CheckResults(
const std::vector<std::pair<std::string, std::string>>& expected_outputs,
const std::vector<std::pair<std::string, std::string>>&
expected_output_shapes) override;
std::vector<std::string> GetOutputNames() override;
void AllocateTensors() override;
void SetThreshold(double relative_threshold, double absolute_threshold);
void SetQuantizationErrorMultiplier(int quantization_error_multiplier);
protected:
Interpreter::TfLiteDelegatePtr delegate_;
private:
void SetInput(const std::string& name, const std::string& csv_values);
void SetExpectation(const std::string& name, const std::string& csv_values);
void SetShapeExpectation(const std::string& name,
const std::string& csv_values);
void DeallocateStringTensor(TfLiteTensor* t) {
if (t) {
free(t->data.raw);
t->data.raw = nullptr;
}
}
void AllocateStringTensor(int id, size_t num_bytes, TfLiteTensor* t) {
t->data.raw = reinterpret_cast<char*>(malloc(num_bytes));
t->bytes = num_bytes;
tensors_to_deallocate_[id] = t;
}
void ResetLSTMStateTensors();
std::string TensorValueToCsvString(const TfLiteTensor* tensor);
std::map<std::string, uint32_t> signature_inputs_;
std::map<std::string, uint32_t> signature_outputs_;
std::unique_ptr<OpResolver> resolver_;
std::unique_ptr<FlatBufferModel> model_;
std::unique_ptr<Interpreter> interpreter_;
std::map<int, std::unique_ptr<DataExpectation>> expected_output_;
std::map<int, std::unique_ptr<ShapeExpectation>> expected_output_shape_;
SignatureRunner* signature_runner_ = nullptr;
bool must_allocate_tensors_ = true;
std::map<int, TfLiteTensor*> tensors_to_deallocate_;
double relative_threshold_;
double absolute_threshold_;
int quantization_error_multiplier_;
};
}
}
#endif
#include "tensorflow/lite/testing/tflite_driver.h"
#include <algorithm>
#include <complex>
#include <cstdint>
#include <cstring>
#include <iostream>
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "Eigen/Core"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/testing/result_expectations.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#if !defined(__APPLE__)
#include "tensorflow/lite/delegates/flex/delegate.h"
#endif
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/gradient/gradient_ops.h"
#include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/register_ref.h"
#include "tensorflow/lite/kernels/test_delegate_providers.h"
#include "tensorflow/lite/signature_runner.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace testing {
namespace {
const double kRelativeThreshold = 1e-2f;
const double kAbsoluteThreshold = 1e-4f;
const char kDefaultSignatureKey[] = "serving_default";
const int kQuantizationErrorMultiplier = 4;
template <typename T>
void SetTensorData(const std::vector<T>& values, void* data) {
T* input_ptr = static_cast<T*>(data);
std::copy(values.begin(), values.end(), input_ptr);
}
bool InterpretAsQuantized(const TfLiteTensor& tensor) {
if (tensor.quantization.type == kTfLiteNoQuantization) return false;
if (tensor.type == kTfLiteUInt8) return false;
if (tensor.quantization.params != nullptr) {
auto* quantization =
reinterpret_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
if (quantization->scale != nullptr && quantization->scale->size == 1 &&
quantization->zero_point != nullptr &&
quantization->zero_point->size == 1) {
return true;
}
}
return false;
}
}
bool TfLiteDriver::InitTestDelegateProviders(int* argc, const char** argv) {
return tflite::KernelTestDelegateProviders::Get()->InitFromCmdlineArgs(argc,
argv);
}
TfLiteDriver::TfLiteDriver(DelegateType delegate_type, bool reference_kernel)
: delegate_(nullptr, nullptr),
relative_threshold_(kRelativeThreshold),
absolute_threshold_(kAbsoluteThreshold),
quantization_error_multiplier_(kQuantizationErrorMultiplier) {
if (reference_kernel) {
resolver_ = std::make_unique<ops::builtin::BuiltinRefOpResolver>();
} else {
resolver_ = std::make_unique<
ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
ops::builtin::BuiltinOpResolver* builtin_op_resolver_ =
reinterpret_cast<ops::builtin::BuiltinOpResolver*>(resolver_.get());
builtin_op_resolver_->AddCustom("IRFFT2D",
tflite::ops::custom::Register_IRFFT2D());
builtin_op_resolver_->AddCustom(
"AvgPool3D", tflite::ops::custom::Register_AVG_POOL_3D());
builtin_op_resolver_->AddCustom(
"MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
builtin_op_resolver_->AddCustom("Roll",
tflite::ops::custom::Register_ROLL());
tflite::ops::custom::AddGradientOps(builtin_op_resolver_);
tflite::ops::custom::AddParseExampleOp(builtin_op_resolver_);
tflite::ops::custom::AddPerceptionOps(builtin_op_resolver_);
}
switch (delegate_type) {
case DelegateType::kNone:
break;
case DelegateType::kNnapi:
delegate_ = evaluation::CreateNNAPIDelegate();
break;
case DelegateType::kGpu:
delegate_ = evaluation::CreateGPUDelegate();
break;
case DelegateType::kFlex:
#if !defined(__APPLE__)
delegate_ = FlexDelegate::Create();
#endif
break;
}
}
TfLiteDriver::~TfLiteDriver() {
for (auto t : tensors_to_deallocate_) {
DeallocateStringTensor(t.second);
}
}
void TfLiteDriver::AllocateTensors() {
if (must_allocate_tensors_) {
if (interpreter_->AllocateTensors() != kTfLiteOk) {
Invalidate("Failed to allocate tensors");
return;
}
ResetLSTMStateTensors();
must_allocate_tensors_ = false;
}
}
void TfLiteDriver::LoadModel(const std::string& bin_file_path,
const std::string& signature) {
if (!IsValid()) return;
model_ = FlatBufferModel::BuildFromFile(GetFullPath(bin_file_path).c_str());
if (!model_) {
Invalidate("Failed to mmap model " + bin_file_path);
return;
}
InterpreterBuilder(*model_, *resolver_)(&interpreter_);
if (!interpreter_) {
Invalidate("Failed build interpreter");
return;
}
if (delegate_) {
if (interpreter_->ModifyGraphWithDelegate(delegate_.get()) != kTfLiteOk) {
Invalidate("Unable to the build graph using the delegate");
return;
}
} else {
auto* delegate_providers = tflite::KernelTestDelegateProviders::Get();
for (auto& one : delegate_providers->CreateAllDelegates()) {
if (interpreter_->ModifyGraphWithDelegate(std::move(one.delegate)) !=
kTfLiteOk) {
Invalidate(
"Unable to the build graph using the delegate initialized from "
"tflite::KernelTestDelegateProviders");
return;
}
}
}
must_allocate_tensors_ = true;
signature_runner_ = interpreter_->GetSignatureRunner(signature.c_str());
if (signature_runner_) {
signature_inputs_ = interpreter_->signature_inputs(signature.c_str());
signature_outputs_ = interpreter_->signature_outputs(signature.c_str());
} else {
Invalidate("Unable to the fetch signature runner.");
}
}
void TfLiteDriver::LoadModel(const std::string& bin_file_path) {
LoadModel(bin_file_path, kDefaultSignatureKey);
}
void TfLiteDriver::ReshapeTensor(const std::string& name,
const std::string& csv_values) {
if (!IsValid()) return;
if (signature_runner_->ResizeInputTensor(
name.c_str(), testing::Split<int>(csv_values, ",")) != kTfLiteOk) {
Invalidate("Failed to resize input tensor " + name);
return;
}
must_allocate_tensors_ = true;
}
void TfLiteDriver::ResetTensor(const std::string& name) {
if (!IsValid()) return;
auto* tensor = signature_runner_->input_tensor(name.c_str());
memset(tensor->data.raw, 0, tensor->bytes);
}
void TfLiteDriver::Invoke(
const std::vector<std::pair<std::string, std::string>>& inputs) {
if (!IsValid()) return;
for (const auto& input : inputs) {
SetInput(input.first, input.second);
}
if (signature_runner_->Invoke() != kTfLiteOk) {
Invalidate("Failed to invoke interpreter");
}
}
std::string TfLiteDriver::ReadOutput(const std::string& name) {
if (!IsValid()) return "";
return TensorValueToCsvString(signature_runner_->output_tensor(name.c_str()));
}
bool TfLiteDriver::CheckResults(
const std::vector<std::pair<std::string, std::string>>& expected_outputs,
const std::vector<std::pair<std::string, std::string>>&
expected_output_shapes) {
if (!IsValid()) return false;
bool success = true;
for (const auto& output : expected_outputs) {
SetExpectation(output.first, output.second);
}
for (const auto& shape : expected_output_shapes) {
SetShapeExpectation(shape.first, shape.second);
}
for (const auto& p : expected_output_) {
int id = p.first;
auto* tensor = interpreter_->tensor(id);
if (!p.second->Check(false, *tensor)) {
std::cerr << "TfLiteDriver: There were errors in invocation '"
<< GetInvocationId() << "', validating output tensor '" << id
<< "':" << std::endl;
p.second->Check(true, *tensor);
success = false;
SetOverallSuccess(false);
}
}
for (const auto& p : expected_output_shape_) {
int id = p.first;
auto* tensor = interpreter_->tensor(id);
if (!p.second->CheckShape(false, *tensor)) {
std::cerr << "TfLiteDriver: There were errors in invocation '"
<< GetInvocationId()
<< "', validating the shape of output tensor '" << id
<< "':" << std::endl;
p.second->CheckShape(true, *tensor);
success = false;
SetOverallSuccess(false);
}
}
expected_output_.clear();
return success;
}
std::vector<std::string> TfLiteDriver::GetOutputNames() {
if (!IsValid()) return {};
std::vector<std::string> names;
for (const auto* name : signature_runner_->output_names()) {
names.push_back(name);
}
return names;
}
void TfLiteDriver::SetInput(const std::string& name,
const std::string& csv_values) {
auto id = signature_inputs_[name];
auto* tensor = signature_runner_->input_tensor(name.c_str());
switch (tensor->type) {
case kTfLiteFloat64: {
const auto& values = testing::Split<double>(csv_values, ",");
if (!CheckSizes<double>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteFloat32: {
const auto& values = testing::Split<float>(csv_values, ",");
if (!CheckSizes<float>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt32: {
const auto& values = testing::Split<int32_t>(csv_values, ",");
if (!CheckSizes<int32_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt32: {
const auto& values = testing::Split<uint32_t>(csv_values, ",");
if (!CheckSizes<uint32_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt64: {
const auto& values = testing::Split<int64_t>(csv_values, ",");
if (!CheckSizes<int64_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt64: {
const auto& values = testing::Split<uint64_t>(csv_values, ",");
if (!CheckSizes<uint64_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt8: {
const auto& values = testing::Split<uint8_t>(csv_values, ",");
if (!CheckSizes<uint8_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt8: {
const auto& values = testing::Split<int8_t>(csv_values, ",");
if (!CheckSizes<int8_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteInt16: {
const auto& values = testing::Split<int16_t>(csv_values, ",");
if (!CheckSizes<int16_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteUInt16: {
const auto& values = testing::Split<uint16_t>(csv_values, ",");
if (!CheckSizes<uint16_t>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteBool: {
const auto& values = testing::Split<bool>(csv_values, ",");
if (!CheckSizes<bool>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteString: {
std::string s = absl::HexStringToBytes(csv_values);
DeallocateStringTensor(tensors_to_deallocate_[id]);
AllocateStringTensor(id, s.size(), tensor);
memcpy(tensor->data.raw, s.data(), s.size());
break;
}
case kTfLiteComplex64: {
const auto& values = testing::Split<std::complex<float>>(csv_values, ",");
if (!CheckSizes<std::complex<float>>(tensor->bytes, values.size()))
return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteComplex128: {
const auto& values =
testing::Split<std::complex<double>>(csv_values, ",");
if (!CheckSizes<std::complex<double>>(tensor->bytes, values.size()))
return;
SetTensorData(values, tensor->data.raw);
break;
}
case kTfLiteFloat16: {
const auto& values = testing::Split<Eigen::half>(csv_values, ",");
for (auto k : values) {
TFLITE_LOG(INFO) << "input" << k;
}
if (!CheckSizes<Eigen::half>(tensor->bytes, values.size())) return;
SetTensorData(values, tensor->data.raw);
break;
}
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
TfLiteTypeGetName(tensor->type),
" in TfLiteDriver::SetInput"));
return;
}
}
void TfLiteDriver::SetThreshold(double relative_threshold,
double absolute_threshold) {
relative_threshold_ = relative_threshold;
absolute_threshold_ = absolute_threshold;
}
void TfLiteDriver::SetQuantizationErrorMultiplier(
int quantization_error_multiplier) {
quantization_error_multiplier_ = quantization_error_multiplier;
}
void TfLiteDriver::SetExpectation(const std::string& name,
const std::string& csv_values) {
auto id = signature_outputs_[name];
auto* tensor = signature_runner_->output_tensor(name.c_str());
if (expected_output_.count(id) != 0) {
Invalidate(absl::StrCat("Overridden expectation for tensor '", id, "'"));
}
expected_output_[id] = std::make_unique<DataExpectation>(
relative_threshold_, absolute_threshold_, quantization_error_multiplier_);
if (InterpretAsQuantized(*tensor)) {
expected_output_[id]->SetData<float>(csv_values);
return;
}
switch (tensor->type) {
case kTfLiteFloat32:
expected_output_[id]->SetData<float>(csv_values);
break;
case kTfLiteInt32:
expected_output_[id]->SetData<int32_t>(csv_values);
break;
case kTfLiteUInt32:
expected_output_[id]->SetData<uint32_t>(csv_values);
break;
case kTfLiteInt64:
expected_output_[id]->SetData<int64_t>(csv_values);
break;
case kTfLiteUInt64:
expected_output_[id]->SetData<uint64_t>(csv_values);
break;
case kTfLiteUInt8:
expected_output_[id]->SetData<uint8_t>(csv_values);
break;
case kTfLiteInt8:
expected_output_[id]->SetData<int8_t>(csv_values);
break;
case kTfLiteUInt16:
expected_output_[id]->SetData<uint16_t>(csv_values);
break;
case kTfLiteInt16:
expected_output_[id]->SetData<int16_t>(csv_values);
break;
case kTfLiteBool:
expected_output_[id]->SetData<bool>(csv_values);
break;
case kTfLiteString:
expected_output_[id]->SetData<std::string>(csv_values);
break;
case kTfLiteFloat64:
expected_output_[id]->SetData<double>(csv_values);
break;
case kTfLiteComplex64:
expected_output_[id]->SetData<std::complex<float>>(csv_values);
break;
case kTfLiteComplex128:
expected_output_[id]->SetData<std::complex<double>>(csv_values);
break;
case kTfLiteFloat16:
expected_output_[id]->SetData<Eigen::half>(csv_values);
break;
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
TfLiteTypeGetName(tensor->type),
" in TfLiteDriver::SetExpectation"));
return;
}
}
void TfLiteDriver::SetShapeExpectation(const std::string& name,
const std::string& csv_values) {
auto id = signature_outputs_[name];
if (expected_output_shape_.count(id) != 0) {
Invalidate(
absl::StrCat("Overridden shape expectation for tensor '", id, "'"));
}
expected_output_shape_[id] = std::make_unique<ShapeExpectation>(csv_values);
}
void TfLiteDriver::ResetLSTMStateTensors() {
interpreter_->ResetVariableTensors();
}
std::string TfLiteDriver::TensorValueToCsvString(const TfLiteTensor* tensor) {
int num_elements = 1;
for (int i = 0; i < tensor->dims->size; ++i) {
num_elements *= tensor->dims->data[i];
}
switch (tensor->type) {
case kTfLiteFloat32:
return JoinDefault(tensor->data.f, num_elements, ",");
case kTfLiteInt32:
return JoinDefault(tensor->data.i32, num_elements, ",");
case kTfLiteUInt32:
return JoinDefault(tensor->data.u32, num_elements, ",");
case kTfLiteInt64:
return JoinDefault(tensor->data.i64, num_elements, ",");
case kTfLiteUInt64:
return JoinDefault(tensor->data.u64, num_elements, ",");
case kTfLiteUInt8:
return Join(tensor->data.uint8, num_elements, ",");
case kTfLiteInt8:
return Join(tensor->data.int8, num_elements, ",");
case kTfLiteUInt16:
return Join(tensor->data.ui16, num_elements, ",");
case kTfLiteInt16:
return Join(tensor->data.i16, num_elements, ",");
case kTfLiteBool:
return JoinDefault(tensor->data.b, num_elements, ",");
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
TfLiteTypeGetName(tensor->type),
" in TfLiteDriver::ReadOutput"));
return "";
}
}
}
} | #include "tensorflow/lite/testing/tflite_driver.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/test_runner.h"
namespace tflite {
namespace testing {
namespace {
using ::testing::ElementsAre;
TEST(TfliteDriverTest, SimpleTest) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver);
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/multi_add.bin", "serving_default");
ASSERT_TRUE(runner->IsValid());
ASSERT_THAT(runner->GetOutputNames(), ElementsAre("x", "y"));
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,0.2,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,0.04"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_EQ(runner->ReadOutput("x"), "0.101,0.202,0.303,0.404");
ASSERT_EQ(runner->ReadOutput("y"), "0.011,0.022,0.033,0.044");
ASSERT_TRUE(runner->CheckResults(
{{"x", "0.101,0.202,0.303,0.404"}, {"y", "0.011,0.022,0.033,0.044"}},
{}));
}
TEST(TfliteDriverTest, SingleAddOpTest) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver(
TfLiteDriver::DelegateType::kNone,
true));
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/multi_add.bin");
ASSERT_TRUE(runner->IsValid());
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,0.2,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,0.04"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_TRUE(runner->CheckResults(
{{"x", "0.101,0.202,0.303,0.404"}, {"y", "0.011,0.022,0.033,0.044"}},
{}));
EXPECT_EQ(runner->ReadOutput("x"), "0.101,0.202,0.303,0.404");
EXPECT_EQ(runner->ReadOutput("y"), "0.011,0.022,0.033,0.044");
}
TEST(TfliteDriverTest, AddOpWithNaNTest) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver(
TfLiteDriver::DelegateType::kNone,
true));
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/multi_add.bin");
ASSERT_TRUE(runner->IsValid());
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,nan,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,nan"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_TRUE(runner->CheckResults(
{{"x", "0.101,nan,0.303,0.404"}, {"y", "0.011,0.022,0.033,nan"}},
{}));
EXPECT_EQ(runner->ReadOutput("x"), "0.101,nan,0.303,0.404");
EXPECT_EQ(runner->ReadOutput("y"), "0.011,0.022,0.033,nan");
}
TEST(TfliteDriverTest, AddQuantizedInt8Test) {
std::unique_ptr<TestRunner> runner(new TfLiteDriver());
runner->SetModelBaseDir("tensorflow/lite");
runner->LoadModel("testdata/add_quantized_int8.bin");
ASSERT_TRUE(runner->IsValid());
runner->ReshapeTensor("a", "1,2,2,1");
ASSERT_TRUE(runner->IsValid());
runner->AllocateTensors();
runner->Invoke({{"a", "1,1,1,1"}});
ASSERT_TRUE(runner->IsValid());
ASSERT_TRUE(runner->CheckResults({{"x", "0.0117,0.0117,0.0117,0.0117"}}, {}));
EXPECT_EQ(runner->ReadOutput("x"), "3,3,3,3");
}
}
}
} |
792 | cpp | tensorflow/tensorflow | message | tensorflow/lite/testing/message.cc | tensorflow/lite/testing/message_test.cc | #ifndef TENSORFLOW_LITE_TESTING_MESSAGE_H_
#define TENSORFLOW_LITE_TESTING_MESSAGE_H_
#include <memory>
#include <string>
#include <vector>
namespace tflite {
namespace testing {
class Message {
public:
static bool Read(std::istream* input, Message* message);
Message() {}
virtual ~Message() {}
virtual void SetField(const std::string& name, const std::string& value) {}
virtual Message* AddChild(const std::string& name) { return nullptr; }
virtual void Finish() {}
protected:
Message* Store(Message* n) {
children_.emplace_back(n);
return n;
}
const std::vector<std::unique_ptr<Message>>& Children() const {
return children_;
}
private:
std::vector<std::unique_ptr<Message>> children_;
};
}
}
#endif
#include "tensorflow/lite/testing/message.h"
#include <stack>
#include <string>
#include "tensorflow/lite/testing/tokenize.h"
namespace tflite {
namespace testing {
class MessageStack : public TokenProcessor {
public:
explicit MessageStack(Message* first_node) {
nodes_.push(first_node);
valid_ = true;
}
void ConsumeToken(std::string* token) override {
if (!valid_) return;
Message* current_node = nodes_.top();
if (*token == "{") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
nodes_.push(current_node ? current_node->AddChild(previous_token_)
: nullptr);
previous_token_.clear();
} else if (*token == "}") {
if (nodes_.size() == 1 || !previous_token_.empty()) {
valid_ = false;
return;
}
if (current_node) {
current_node->Finish();
}
nodes_.pop();
} else if (*token == ":") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
} else {
if (previous_token_.empty()) {
previous_token_.swap(*token);
} else {
if (current_node) {
current_node->SetField(previous_token_, *token);
}
previous_token_.clear();
}
}
}
bool valid() const { return valid_; }
private:
std::stack<Message*> nodes_;
std::string previous_token_;
bool valid_;
};
bool Message::Read(std::istream* input, Message* message) {
MessageStack stack(message);
Tokenize(input, &stack);
return stack.valid();
}
}
} | #include "tensorflow/lite/testing/message.h"
#include <map>
#include <string>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
class TestMessage : public Message {
public:
TestMessage() {}
explicit TestMessage(const std::string& text_to_parse) {
std::stringstream ss(text_to_parse);
finished_ = Message::Read(&ss, this);
}
void SetField(const std::string& name, const std::string& value) override {
fields_[name] = value;
}
Message* AddChild(const std::string& name) override {
TestMessage* m = new TestMessage;
m->name_ = name;
return Store(m);
}
void Finish() override { finished_ = true; }
int NumChildren() const { return Children().size(); }
const TestMessage* GetChild(int i) const {
return dynamic_cast<TestMessage*>(Children()[i].get());
}
int NumFields() const { return fields_.size(); }
const std::string& GetField(const std::string& key) const {
return fields_.at(key);
}
const std::string& name() const { return name_; }
bool finished() const { return finished_; }
protected:
std::string name_;
std::map<std::string, std::string> fields_;
bool finished_ = false;
};
TEST(MessageTest, Simple) {
TestMessage message("x{a:1 b:2} y{} z{c:3} d:4");
ASSERT_TRUE(message.finished());
ASSERT_EQ(message.NumFields(), 1);
EXPECT_EQ(message.GetField("d"), "4");
ASSERT_EQ(message.NumChildren(), 3);
auto* x = message.GetChild(0);
EXPECT_EQ(x->name(), "x");
ASSERT_EQ(x->NumFields(), 2);
EXPECT_EQ(x->GetField("a"), "1");
EXPECT_EQ(x->GetField("b"), "2");
auto* y = message.GetChild(1);
EXPECT_EQ(y->name(), "y");
ASSERT_EQ(y->NumFields(), 0);
auto* z = message.GetChild(2);
EXPECT_EQ(z->name(), "z");
ASSERT_EQ(z->NumFields(), 1);
EXPECT_EQ(z->GetField("c"), "3");
}
TEST(MessageTest, Unnamed) {
TestMessage message("x{c:3} {} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, TooManyBraces) {
TestMessage message("x{c:3} } y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, LeftoverToken) {
TestMessage message("x{c:3} z{test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingKey) {
TestMessage message("x{c:3} z{:test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingValue) {
TestMessage message("x{c:3} z{test:} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
}
}
} |
793 | cpp | tensorflow/tensorflow | split | tensorflow/lite/testing/split.cc | tensorflow/lite/testing/split_test.cc | #ifndef TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_SPLIT_H_
#define TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_SPLIT_H_
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/versions.pb.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace tools::proto_splitter {
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::VersionDef;
class Splitter {
public:
virtual ~Splitter() = default;
virtual absl::StatusOr<ChunkedProto> Split() = 0;
virtual absl::Status Write(std::string file_prefix) = 0;
virtual VersionDef Version() = 0;
};
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/split.h"
#include <map>
#include <string>
#include <vector>
namespace tflite {
namespace gpu {
Split::Split(const GpuInfo& gpu_info, const OperationDef& definition,
const SplitAttributes& attr, const std::vector<int>& channels)
: GPUOperation(definition), attr_(attr) {
work_group_size_ = int3(8, 4, 1);
code_ = attr.axis == Axis::CHANNELS ? GetSplitChannelsCode(gpu_info, channels)
: GetSplitCode();
}
std::string Split::GetSplitCode() {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
AddDstTensor("dst_tensor_" + std::to_string(i), definition_.dst_tensors[i]);
}
const std::string task_width =
attr_.axis == Axis::WIDTH ? "1" : "args.src_tensor.Width()";
const std::string task_height =
attr_.axis == Axis::HEIGHT ? "1" : "args.src_tensor.Height()";
const std::string task_depth =
attr_.axis == Axis::DEPTH ? "1" : "args.src_tensor.Depth()";
const std::string task_batch =
attr_.axis == Axis::BATCH ? "1" : "args.src_tensor.Batch()";
const std::string task_slices =
attr_.axis == Axis::CHANNELS ? "1" : "args.src_tensor.Slices()";
std::map<Axis, std::string> axis_to_selector = {
{Axis::WIDTH, "Width"}, {Axis::HEIGHT, "Height"},
{Axis::DEPTH, "Depth"}, {Axis::CHANNELS, "Slices"},
{Axis::BATCH, "Batch"},
};
std::map<Axis, std::string> axis_to_coord = {
{Axis::WIDTH, "X"}, {Axis::HEIGHT, "Y"}, {Axis::DEPTH, "D"},
{Axis::CHANNELS, "S"}, {Axis::BATCH, "B"},
};
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / " + task_batch + ";\n";
c += " int B = linear_id % " + task_batch + ";\n";
c += " if (X >= " + task_width + ") return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= " + task_width + ") return;\n";
}
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % " + task_height + ";\n";
c += " int D = linear_id / " + task_height + ";\n";
c += " if (D >= " + task_depth + ") return;\n";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= " + task_height + ") return;\n";
}
c += " int S = GLOBAL_ID_2;\n";
c += " if (S >= " + task_slices + ") return;\n";
c += " int src_counter = 0;\n";
std::vector<std::string> src_coords;
for (auto axis :
{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH, Axis::CHANNELS, Axis::BATCH}) {
if (definition_.src_tensors[0].HasAxis(axis)) {
const std::string coord_name =
attr_.axis == axis ? "src_counter" : axis_to_coord[axis];
src_coords.push_back(coord_name);
}
}
std::string src_coords_str = src_coords[0];
for (int i = 1; i < src_coords.size(); ++i) {
src_coords_str += ", " + src_coords[i];
}
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
std::vector<std::string> dst_coords;
for (auto axis : {Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH, Axis::CHANNELS,
Axis::BATCH}) {
if (definition_.dst_tensors[i].HasAxis(axis)) {
const std::string coord_name =
attr_.axis == axis ? "i" : axis_to_coord[axis];
dst_coords.push_back(coord_name);
}
}
std::string dst_coords_str = dst_coords[0];
for (int j = 1; j < dst_coords.size(); ++j) {
dst_coords_str += ", " + dst_coords[j];
}
const std::string dst_name = "args.dst_tensor_" + std::to_string(i);
c += " for (int i = 0; i < " + dst_name + "." +
axis_to_selector[attr_.axis] + "(); ++i, src_counter++) {\n";
c += " args.src_tensor::type result = args.src_tensor.Read(" +
src_coords_str + ");\n";
c += " " + dst_name + ".Write(result, " + dst_coords_str + ");\n";
c += " }\n";
}
c += "}\n";
return c;
}
std::string Split::GetSplitChannelsCode(const GpuInfo& gpu_info,
const std::vector<int>& channels) {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
AddDstTensor("dst_tensor_" + std::to_string(i), definition_.dst_tensors[i]);
}
const std::string batch_coord =
definition_.src_tensors[0].HasAxis(Axis::BATCH) ? ", B" : "";
std::string coords = "X, Y";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.src_tensor.Batch();\n";
c += " int B = linear_id % args.src_tensor.Batch();\n";
c += " if (X >= args.src_tensor.Width()) return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= args.src_tensor.Width()) return;\n";
}
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % args.src_tensor.Height();\n";
c += " int Z = linear_id / args.src_tensor.Height();\n";
c += " if (Z >= args.src_tensor.Depth()) return;\n";
coords += ", Z";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= args.src_tensor.Height()) return;\n";
}
int src_channels = 0;
for (auto dst_ch : channels) {
src_channels += dst_ch;
}
const int src_slices = DivideRoundUp(src_channels, 4);
int dst_ch = 0;
int dst_slice = 0;
int dst_tensor = 0;
const std::string postfix[] = {".x", ".y", ".z", ".w"};
c += " args.src_tensor::type dst_val;\n";
for (int s = 0; s < src_slices; ++s) {
c += " if (" + std::to_string(s) + " < args.src_tensor.Slices()) {\n";
c += " args.src_tensor::type src_val = args.src_tensor.Read(" + coords +
", " + std::to_string(s) + batch_coord + ");\n";
for (int k = 0; k < 4 && s * 4 + k < src_channels; ++k) {
c += " dst_val" + postfix[dst_ch % 4] + " = src_val" + postfix[k] +
";\n";
dst_ch++;
if (dst_ch == channels[dst_tensor]) {
const std::string dst_name =
"args.dst_tensor_" + std::to_string(dst_tensor);
c += " " + dst_name + ".Write(dst_val, " + coords + ", " +
std::to_string(dst_slice) + batch_coord + ");\n";
dst_tensor += 1;
dst_ch = 0;
dst_slice = 0;
}
if (dst_ch != 0 && dst_ch % 4 == 0) {
const std::string dst_name =
"args.dst_tensor_" + std::to_string(dst_tensor);
c += " " + dst_name + ".Write(dst_val, " + coords + ", " +
std::to_string(dst_slice) + batch_coord + ");\n";
dst_slice += 1;
}
}
if (gpu_info.IsMali()) {
c += " } else { return; }\n";
} else {
c += " }\n";
}
}
c += "}\n";
return c;
}
int3 Split::GetGridSize() const {
const int width = attr_.axis == Axis::WIDTH ? 1 : src_[0]->Width();
const int height = attr_.axis == Axis::HEIGHT ? 1 : src_[0]->Height();
const int depth = attr_.axis == Axis::DEPTH ? 1 : src_[0]->Depth();
const int batch = attr_.axis == Axis::BATCH ? 1 : src_[0]->Batch();
const int slices = attr_.axis == Axis::CHANNELS ? 1 : src_[0]->Slices();
const int grid_x = width * batch;
const int grid_y = height * depth;
const int grid_z = slices;
return int3(grid_x, grid_y, grid_z);
}
Split CreateSplit(const GpuInfo& gpu_info, const OperationDef& definition,
const SplitAttributes& attr,
const std::vector<int>& channels) {
return Split(gpu_info, definition, attr, channels);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/split_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, SplitChannels) {
auto status = SplitChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SplitChannelsX4) {
auto status = SplitChannelsX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SplitWidth) {
auto status = SplitWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SplitHeight) {
auto status = SplitHeightTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SplitBatch) {
auto status = SplitBatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SplitDepth) {
auto status = SplitDepthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} |
794 | cpp | tensorflow/tensorflow | tf_driver | tensorflow/lite/testing/tf_driver.cc | tensorflow/lite/testing/tf_driver_test.cc | #ifndef TENSORFLOW_LITE_TESTING_TF_DRIVER_H_
#define TENSORFLOW_LITE_TESTING_TF_DRIVER_H_
#include <cstdint>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/testing/split.h"
#include "tensorflow/lite/testing/test_runner.h"
namespace tflite {
namespace testing {
class TfDriver : public TestRunner {
public:
explicit TfDriver(const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer);
~TfDriver() override {}
void LoadModel(const string& bin_file_path) override;
void LoadModel(const string& bin_file_path, const string&) override {
LoadModel(bin_file_path);
}
void ReshapeTensor(const string& name, const string& csv_values) override;
void ResetTensor(const std::string& name) override;
string ReadOutput(const string& name) override;
void Invoke(const std::vector<std::pair<string, string>>& inputs) override;
bool CheckResults(
const std::vector<std::pair<string, string>>& expected_outputs,
const std::vector<std::pair<string, string>>& expected_output_shapes)
override {
return true;
}
std::vector<string> GetOutputNames() override { return output_names_; }
void AllocateTensors() override {}
protected:
void SetInput(const string& values_as_string, tensorflow::Tensor*);
string ReadOutput(const tensorflow::Tensor& tensor);
private:
std::unique_ptr<tensorflow::Session> session_;
std::vector<int> input_ids_;
std::vector<string> input_names_;
absl::flat_hash_map<string, int> input_name_to_id_;
std::vector<std::vector<int64_t>> input_shapes_;
std::vector<tensorflow::DataType> input_types_;
std::unordered_map<string, tensorflow::Tensor> input_tensors_;
std::vector<int> output_ids_;
std::vector<string> output_names_;
absl::flat_hash_map<string, int> output_name_to_id_;
std::vector<::tensorflow::Tensor> output_tensors_;
};
}
}
#endif
#include "tensorflow/lite/testing/tf_driver.h"
#include <fstream>
#include <iostream>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
tensorflow::Tensor CreateTensor(const tensorflow::DataType type,
const std::vector<int64_t>& dim) {
tensorflow::TensorShape shape{absl::Span<const int64_t>{
reinterpret_cast<const int64_t*>(dim.data()), dim.size()}};
return {type, shape};
}
template <typename T>
int FillTensorWithData(tensorflow::Tensor* tensor,
const string& values_as_string) {
const auto& values = testing::Split<T>(values_as_string, ",");
if (values.size() == tensor->NumElements()) {
auto data = tensor->flat<T>();
for (int i = 0; i < values.size(); i++) {
data(i) = values[i];
}
}
return values.size();
}
int FillTensorWithTfLiteHexString(tensorflow::Tensor* tensor,
const string& values_as_string) {
string s = absl::HexStringToBytes(values_as_string);
int num_strings = values_as_string.empty() ? 0 : GetStringCount(s.data());
if (num_strings == tensor->NumElements()) {
auto data = tensor->flat<tensorflow::tstring>();
for (size_t i = 0; i < num_strings; ++i) {
auto ref = GetString(s.data(), i);
data(i).assign(ref.str, ref.len);
}
}
return num_strings;
}
template <typename T>
void FillTensorWithZeros(tensorflow::Tensor* tensor) {
auto data = tensor->flat<T>();
for (int i = 0; i < tensor->NumElements(); i++) {
data(i) = 0;
}
}
template <typename T>
string TensorDataToCsvString(const tensorflow::Tensor& tensor) {
const auto& data = tensor.flat<T>();
return Join(data.data(), data.size(), ",");
}
string TensorDataToTfLiteHexString(const tensorflow::Tensor& tensor) {
DynamicBuffer dynamic_buffer;
auto data = tensor.flat<tensorflow::tstring>();
for (int i = 0; i < tensor.NumElements(); ++i) {
dynamic_buffer.AddString(data(i).data(), data(i).size());
}
char* char_buffer = nullptr;
size_t size = dynamic_buffer.WriteToBuffer(&char_buffer);
string s = absl::BytesToHexString({char_buffer, size});
free(char_buffer);
return s;
}
}
TfDriver::TfDriver(const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer)
: input_names_(input_layer), output_names_(output_layer) {
CHECK_EQ(input_layer.size(), input_layer_type.size());
CHECK_EQ(input_layer.size(), input_layer_shape.size());
input_ids_.resize(input_layer.size());
input_tensors_.reserve(input_layer.size());
input_types_.resize(input_layer.size());
input_shapes_.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
input_ids_[i] = i;
input_tensors_[input_layer[i]] = {};
CHECK(DataTypeFromString(input_layer_type[i], &input_types_[i]));
input_shapes_[i] = Split<int64_t>(input_layer_shape[i], ",");
input_name_to_id_[input_layer[i]] = i;
}
output_ids_.resize(output_layer.size());
output_tensors_.reserve(output_layer.size());
for (int i = 0; i < output_layer.size(); i++) {
output_ids_[i] = i;
output_name_to_id_[output_layer[i]] = i;
}
}
void TfDriver::LoadModel(const string& bin_file_path) {
if (!IsValid()) return;
std::ifstream model(bin_file_path);
if (model.fail()) {
Invalidate("Failed to find the model " + bin_file_path);
return;
}
tensorflow::GraphDef graphdef;
if (!graphdef.ParseFromIstream(&model)) {
Invalidate("Failed to parse tensorflow graphdef");
return;
}
tensorflow::SessionOptions options;
session_.reset(tensorflow::NewSession(options));
auto status = session_->Create(graphdef);
if (!status.ok()) {
Invalidate(absl::StrCat("Failed to create session. ", status.message()));
}
}
void TfDriver::ReshapeTensor(const string& name, const string& csv_values) {
if (!IsValid()) return;
int id = input_name_to_id_[name];
input_shapes_[id] = Split<int64_t>(csv_values, ",");
input_tensors_[input_names_[id]] =
CreateTensor(input_types_[id], input_shapes_[id]);
ResetTensor(name);
}
void TfDriver::ResetTensor(const std::string& name) {
if (!IsValid()) return;
int id = input_name_to_id_[name];
auto tensor = input_tensors_[input_names_[id]];
switch (input_types_[id]) {
case tensorflow::DT_FLOAT: {
FillTensorWithZeros<float>(&tensor);
break;
}
case tensorflow::DT_INT32: {
FillTensorWithZeros<int32_t>(&tensor);
break;
}
default:
Invalidate(absl::StrCat("Unsupported tensor type ", input_types_[id],
tensorflow::DataType_Name(input_types_[id]),
" in ResetInput"));
return;
}
}
string TfDriver::ReadOutput(const string& name) {
if (!IsValid()) return "";
return ReadOutput(output_tensors_[output_name_to_id_[name]]);
}
void TfDriver::Invoke(const std::vector<std::pair<string, string>>& inputs) {
if (!IsValid()) return;
for (const auto& input : inputs) {
auto id = input_name_to_id_[input.first];
auto tensor = CreateTensor(input_types_[id], input_shapes_[id]);
SetInput(input.second, &tensor);
input_tensors_[input_names_[id]] = tensor;
}
auto status = session_->Run({input_tensors_.begin(), input_tensors_.end()},
output_names_, {}, &output_tensors_);
if (!status.ok()) {
Invalidate(
absl::StrCat("TensorFlow failed to run graph:", status.message()));
}
}
void TfDriver::SetInput(const string& values_as_string,
tensorflow::Tensor* tensor) {
int num_values_available = 0;
switch (tensor->dtype()) {
case tensorflow::DT_FLOAT:
num_values_available =
FillTensorWithData<float>(tensor, values_as_string);
break;
case tensorflow::DT_INT32:
num_values_available =
FillTensorWithData<int32_t>(tensor, values_as_string);
break;
case tensorflow::DT_UINT32:
num_values_available =
FillTensorWithData<uint32_t>(tensor, values_as_string);
break;
case tensorflow::DT_UINT8:
num_values_available =
FillTensorWithData<uint8_t>(tensor, values_as_string);
break;
case tensorflow::DT_STRING:
num_values_available =
FillTensorWithTfLiteHexString(tensor, values_as_string);
break;
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
tensorflow::DataType_Name(tensor->dtype()),
" in SetInput"));
return;
}
if (tensor->NumElements() != num_values_available) {
Invalidate(absl::StrCat("Needed ", tensor->NumElements(),
" values for input tensor, but was given ",
num_values_available, " instead."));
}
}
string TfDriver::ReadOutput(const tensorflow::Tensor& tensor) {
switch (tensor.dtype()) {
case tensorflow::DT_FLOAT:
return TensorDataToCsvString<float>(tensor);
case tensorflow::DT_INT32:
return TensorDataToCsvString<int32_t>(tensor);
case tensorflow::DT_UINT32:
return TensorDataToCsvString<uint32_t>(tensor);
case tensorflow::DT_INT64:
return TensorDataToCsvString<int64_t>(tensor);
case tensorflow::DT_UINT8:
return TensorDataToCsvString<uint8_t>(tensor);
case tensorflow::DT_STRING:
return TensorDataToTfLiteHexString(tensor);
case tensorflow::DT_BOOL:
return TensorDataToCsvString<bool>(tensor);
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
tensorflow::DataType_Name(tensor.dtype()),
" in ReadOutput"));
return "";
}
}
}
} | #include "tensorflow/lite/testing/tf_driver.h"
#include <algorithm>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace testing {
namespace {
class TestDriver : public TfDriver {
public:
TestDriver() : TfDriver({}, {}, {}, {}) {}
string WriteAndReadBack(tensorflow::DataType type,
const std::vector<int64_t>& shape,
const string& values) {
tensorflow::Tensor t = {
type,
tensorflow::TensorShape{absl::Span<const int64_t>{
reinterpret_cast<const int64_t*>(shape.data()), shape.size()}}};
SetInput(values, &t);
return ReadOutput(t);
}
};
TEST(TfDriverTest, ReadingAndWritingValues) {
TestDriver driver;
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_FLOAT, {1, 2, 2},
"0.10,0.20,0.30,0.40"),
"0.100000001,0.200000003,0.300000012,0.400000006");
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_INT32, {1, 2, 2},
"10,40,100,-100"),
"10,40,100,-100");
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_UINT8, {1, 2, 2},
"48,49,121, 122"),
"0,1,y,z");
}
TEST(TfDriverTest, ReadingAndWritingValuesStrings) {
TestDriver driver;
auto set_buffer = [](const std::vector<string>& values, string* buffer) {
DynamicBuffer dynamic_buffer;
for (const string& s : values) {
dynamic_buffer.AddString(s.data(), s.size());
}
char* char_b = nullptr;
int size = dynamic_buffer.WriteToBuffer(&char_b);
*buffer = absl::BytesToHexString(absl::string_view(char_b, size));
free(char_b);
};
string buffer;
set_buffer({"", "", "", ""}, &buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer),
buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, ""),
buffer);
set_buffer({"AB", "ABC", "X", "YZ"}, &buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer),
buffer);
}
TEST(TfDriverTest, SimpleTest) {
std::unique_ptr<TfDriver> runner(
new TfDriver({"a", "b", "c", "d"}, {"float", "float", "float", "float"},
{"1,8,8,3", "1,8,8,3", "1,8,8,3", "1,8,8,3"}, {"x", "y"}));
runner->LoadModel("tensorflow/lite/testdata/multi_add.pb");
EXPECT_TRUE(runner->IsValid()) << runner->GetErrorMessage();
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,0.2,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,0.04"}});
ASSERT_EQ(runner->ReadOutput("x"),
"0.101000004,0.202000007,0.303000003,0.404000014");
ASSERT_EQ(runner->ReadOutput("y"),
"0.0109999999,0.0219999999,0.0329999998,0.0439999998");
}
}
}
} |
795 | cpp | tensorflow/tensorflow | input_generator | tensorflow/lite/testing/kernel_test/input_generator.cc | tensorflow/lite/testing/kernel_test/input_generator_test.cc | #ifndef TENSORFLOW_LITE_TESTING_KERNEL_TEST_INPUT_GENERATOR_H_
#define TENSORFLOW_LITE_TESTING_KERNEL_TEST_INPUT_GENERATOR_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/signature_runner.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
class InputGenerator {
public:
InputGenerator() = default;
TfLiteStatus LoadModel(const string& model_dir);
TfLiteStatus LoadModel(const string& model_dir, const string& signature);
TfLiteStatus ReadInputsFromFile(const string& filename);
TfLiteStatus GenerateInput(const string& distribution);
std::vector<std::pair<string, string>> GetInputs() { return inputs_; }
TfLiteStatus WriteInputsToFile(const string& filename);
private:
std::unique_ptr<FlatBufferModel> model_;
std::unique_ptr<Interpreter> interpreter_;
SignatureRunner* signature_runner_ = nullptr;
std::vector<std::pair<string, string>> inputs_;
};
}
}
#endif
#include "tensorflow/lite/testing/kernel_test/input_generator.h"
#include <cstdio>
#include <fstream>
#include <limits>
#include <random>
#include <string>
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
static constexpr char kDefaultServingSignatureDefKey[] = "serving_default";
template <typename T>
std::vector<T> GenerateRandomTensor(TfLiteIntArray* dims,
const std::function<T(int)>& random_func) {
int64_t num_elements = 1;
for (int i = 0; i < dims->size; i++) {
num_elements *= dims->data[i];
}
std::vector<T> result(num_elements);
for (int i = 0; i < num_elements; i++) {
result[i] = random_func(i);
}
return result;
}
template <typename T>
std::vector<T> GenerateUniform(TfLiteIntArray* dims, float min, float max) {
auto random_float = [](float min, float max) {
return min + (max - min) * static_cast<float>(rand()) / RAND_MAX;
};
std::function<T(int)> random_t = [&](int) {
return static_cast<T>(random_float(min, max));
};
std::vector<T> data = GenerateRandomTensor(dims, random_t);
return data;
}
template <typename T>
std::vector<T> GenerateGaussian(TfLiteIntArray* dims, float min, float max) {
auto random_float = [](float min, float max) {
static std::default_random_engine generator;
static std::normal_distribution<double> distribution(0.5, 1.0 / 3);
auto rand_n = distribution(generator);
while (rand_n < 0 || rand_n >= 1) {
rand_n = distribution(generator);
}
return min + (max - min) * static_cast<float>(rand_n);
};
std::function<T(int)> random_t = [&](int) {
return static_cast<T>(random_float(min, max));
};
std::vector<T> data = GenerateRandomTensor(dims, random_t);
return data;
}
}
TfLiteStatus InputGenerator::LoadModel(const string& model_dir) {
return LoadModel(model_dir, kDefaultServingSignatureDefKey);
}
TfLiteStatus InputGenerator::LoadModel(const string& model_dir,
const string& signature) {
model_ = FlatBufferModel::BuildFromFile(model_dir.c_str());
if (!model_) {
fprintf(stderr, "Cannot load model %s", model_dir.c_str());
return kTfLiteError;
}
::tflite::ops::builtin::BuiltinOpResolver builtin_ops;
InterpreterBuilder(*model_, builtin_ops)(&interpreter_);
if (!interpreter_) {
fprintf(stderr, "Failed to build interpreter.");
return kTfLiteError;
}
signature_runner_ = interpreter_->GetSignatureRunner(signature.c_str());
if (!signature_runner_) {
fprintf(stderr, "Failed to get SignatureRunner.\n");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus InputGenerator::ReadInputsFromFile(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ifstream input_file(filename);
string input;
while (std::getline(input_file, input, '\n')) {
std::vector<string> parts = Split<string>(input, ":");
if (parts.size() != 2) {
fprintf(stderr, "Expected <name>:<value>, got %s", input.c_str());
return kTfLiteError;
}
inputs_.push_back(std::make_pair(parts[0], parts[1]));
}
input_file.close();
return kTfLiteOk;
}
TfLiteStatus InputGenerator::WriteInputsToFile(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ofstream output_file;
output_file.open(filename, std::fstream::out | std::fstream::trunc);
if (!output_file) {
fprintf(stderr, "Failed to open output file %s.", filename.c_str());
return kTfLiteError;
}
for (const auto& input : inputs_) {
output_file << input.first << ":" << input.second << "\n";
}
output_file.close();
return kTfLiteOk;
}
TfLiteStatus InputGenerator::GenerateInput(const string& distribution) {
auto input_tensor_names = signature_runner_->input_names();
for (const char* name : input_tensor_names) {
auto* tensor = signature_runner_->input_tensor(name);
if (distribution == "UNIFORM") {
switch (tensor->type) {
case kTfLiteInt8: {
auto data = GenerateUniform<int8_t>(
tensor->dims, std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteUInt8: {
auto data = GenerateUniform<uint8_t>(
tensor->dims, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteFloat32: {
auto data = GenerateUniform<float>(tensor->dims, -1, 1);
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
default:
fprintf(stderr, "Unsupported input tensor type %s.",
TfLiteTypeGetName(tensor->type));
break;
}
} else if (distribution == "GAUSSIAN") {
switch (tensor->type) {
case kTfLiteInt8: {
auto data = GenerateGaussian<int8_t>(
tensor->dims, std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteUInt8: {
auto data = GenerateGaussian<uint8_t>(
tensor->dims, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteFloat32: {
auto data = GenerateGaussian<float>(tensor->dims, -1, 1);
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
default:
fprintf(stderr, "Unsupported input tensor type %s.",
TfLiteTypeGetName(tensor->type));
break;
}
} else {
fprintf(stderr, "Unsupported distribution %s.", distribution.c_str());
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/testing/kernel_test/input_generator.h"
#include <fstream>
#include <map>
#include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(InputGeneratorTest, LoadModel) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
}
TEST(InputGeneratorTest, ReadWriteSimpleFile) {
InputGenerator input_generator;
ASSERT_EQ(
input_generator.ReadInputsFromFile("tensorflow/lite/testing/"
"kernel_test/testdata/test_input.csv"),
kTfLiteOk);
std::string content = "1";
for (int i = 0; i < 1 * 8 * 8 * 3 - 1; i++) {
content.append(",1");
}
std::vector<std::pair<string, string>> inputs = {{"a", content}};
ASSERT_EQ(input_generator.GetInputs(), inputs);
auto output_filename = ::testing::TempDir() + "/out.csv";
ASSERT_EQ(input_generator.WriteInputsToFile(output_filename), kTfLiteOk);
std::ifstream in(output_filename);
std::string out;
std::getline(in, out, '\n');
std::string expected_out = "a:";
expected_out.append(content);
ASSERT_EQ(out, expected_out);
}
TEST(InputGeneratorTest, GenerateUniformInput) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
input_generator.GenerateInput("UNIFORM");
auto inputs = input_generator.GetInputs();
ASSERT_EQ(inputs.size(), 4);
}
TEST(InputGeneratorTest, GenerateGaussianInput) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
input_generator.GenerateInput("GAUSSIAN");
auto inputs = input_generator.GetInputs();
ASSERT_EQ(inputs.size(), 4);
}
}
}
} |
796 | cpp | tensorflow/tensorflow | diff_analyzer | tensorflow/lite/testing/kernel_test/diff_analyzer.cc | tensorflow/lite/testing/kernel_test/diff_analyzer_test.cc | #ifndef TENSORFLOW_LITE_TESTING_KERNEL_TEST_DIFF_ANALYZER_H_
#define TENSORFLOW_LITE_TESTING_KERNEL_TEST_DIFF_ANALYZER_H_
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
class DiffAnalyzer {
public:
DiffAnalyzer() = default;
TfLiteStatus ReadFiles(const string& base, const string& test);
TfLiteStatus WriteReport(const string& filename);
private:
std::unordered_map<string, std::vector<float>> base_tensors_;
std::unordered_map<string, std::vector<float>> test_tensors_;
};
}
}
#endif
#include "tensorflow/lite/testing/kernel_test/diff_analyzer.h"
#include <algorithm>
#include <cmath>
#include <fstream>
#include <string>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
float CalculateNormalizedMaxDiff(const std::vector<float>& base,
const std::vector<float>& test) {
float diff = 0;
float base_max = 1e-6;
for (int i = 0; i < base.size(); i++) {
diff = std::max(diff, std::abs(base[i] - test[i]));
base_max = std::max(base_max, base[i]);
}
return diff / base_max;
}
float CalculateNormalizedL2Norm(const std::vector<float>& base,
const std::vector<float>& test) {
float l2_error = 0;
float base_max = 1e-6;
for (int i = 0; i < base.size(); i++) {
float diff = base[i] - test[i];
l2_error += diff * diff;
base_max = std::max(base_max, base[i]);
}
l2_error /= base.size();
return std::sqrt(l2_error) / base_max;
}
TfLiteStatus Populate(const string& filename,
std::unordered_map<string, std::vector<float>>* tensors) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ifstream file(filename);
string content;
while (std::getline(file, content, '\n')) {
auto parts = Split<string>(content, ":");
if (parts.size() != 2) {
fprintf(stderr, "Expected <name>:<value>, got %s", content.c_str());
return kTfLiteError;
}
tensors->insert(std::make_pair(parts[0], Split<float>(parts[1], ",")));
}
file.close();
return kTfLiteOk;
}
}
TfLiteStatus DiffAnalyzer::ReadFiles(const string& base, const string& test) {
TF_LITE_ENSURE_STATUS(Populate(base, &base_tensors_));
TF_LITE_ENSURE_STATUS(Populate(test, &test_tensors_));
if (base_tensors_.size() != test_tensors_.size()) {
fprintf(stderr, "Golden and test tensor dimensions don't match.");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus DiffAnalyzer::WriteReport(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty output file name.");
return kTfLiteError;
}
std::ofstream output_file;
output_file.open(filename, std::fstream::out | std::fstream::trunc);
if (!output_file) {
fprintf(stderr, "Failed to open output file %s.", filename.c_str());
return kTfLiteError;
}
output_file << "Normalized L2 Error"
<< ","
<< "Normalized Max Diff"
<< "\n";
for (const auto& item : base_tensors_) {
const auto& name = item.first;
if (!test_tensors_.count(name)) {
fprintf(stderr, "Missing tensor %s in test tensors.", name.c_str());
continue;
}
float l2_error =
CalculateNormalizedL2Norm(base_tensors_[name], test_tensors_[name]);
float max_diff =
CalculateNormalizedMaxDiff(base_tensors_[name], test_tensors_[name]);
output_file << name << ":" << l2_error << "," << max_diff << "\n";
}
output_file.close();
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/testing/kernel_test/diff_analyzer.h"
#include <fstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/io/path.h"
namespace tflite {
namespace testing {
namespace {
TEST(DiffAnalyzerTest, ZeroDiff) {
DiffAnalyzer diff_analyzer;
string filename =
"tensorflow/lite/testing/kernel_test/testdata/test_input.csv";
ASSERT_EQ(diff_analyzer.ReadFiles(filename, filename), kTfLiteOk);
string output_file =
tensorflow::io::JoinPath(::testing::TempDir(), "diff_report.csv");
ASSERT_EQ(diff_analyzer.WriteReport(output_file), kTfLiteOk);
std::string content;
std::ifstream file(output_file);
std::getline(file, content);
std::getline(file, content);
ASSERT_EQ(content, "a:0,0");
}
}
}
} |
797 | cpp | tensorflow/tensorflow | toco_cmdline_flags | tensorflow/lite/toco/toco_cmdline_flags.cc | tensorflow/lite/toco/toco_cmdline_flags_test.cc | #ifndef TENSORFLOW_LITE_TOCO_TOCO_CMDLINE_FLAGS_H_
#define TENSORFLOW_LITE_TOCO_TOCO_CMDLINE_FLAGS_H_
#include <string>
#include <vector>
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
#include "tensorflow/lite/toco/types.pb.h"
namespace toco {
bool ParseTocoFlagsFromCommandLineFlags(int* argc, char* argv[],
std::string* msg,
ParsedTocoFlags* parsed_toco_flags_ptr);
void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags,
TocoFlags* toco_flags);
}
#endif
#include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
bool ParseTocoFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedTocoFlags* parsed_toco_flags_ptr) {
using tensorflow::Flag;
ParsedTocoFlags& parsed_flags = *parsed_toco_flags_ptr;
std::vector<tensorflow::Flag> flags = {
Flag("input_file", parsed_flags.input_file.bind(),
parsed_flags.input_file.default_value(),
"Input file (model of any supported format). For Protobuf "
"formats, both text and binary are supported regardless of file "
"extension."),
Flag("savedmodel_directory", parsed_flags.savedmodel_directory.bind(),
parsed_flags.savedmodel_directory.default_value(),
"Deprecated. Full path to the directory containing the SavedModel."),
Flag("output_file", parsed_flags.output_file.bind(),
parsed_flags.output_file.default_value(),
"Output file. "
"For Protobuf formats, the binary format will be used."),
Flag("input_format", parsed_flags.input_format.bind(),
parsed_flags.input_format.default_value(),
"Input file format. One of: TENSORFLOW_GRAPHDEF, TFLITE."),
Flag("output_format", parsed_flags.output_format.bind(),
parsed_flags.output_format.default_value(),
"Output file format. "
"One of TENSORFLOW_GRAPHDEF, TFLITE, GRAPHVIZ_DOT."),
Flag("savedmodel_tagset", parsed_flags.savedmodel_tagset.bind(),
parsed_flags.savedmodel_tagset.default_value(),
"Deprecated. Comma-separated set of tags identifying the "
"MetaGraphDef within the SavedModel to analyze. All tags in the tag "
"set must be specified."),
Flag("default_ranges_min", parsed_flags.default_ranges_min.bind(),
parsed_flags.default_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_ranges_max", parsed_flags.default_ranges_max.bind(),
parsed_flags.default_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_int16_ranges_min",
parsed_flags.default_int16_ranges_min.bind(),
parsed_flags.default_int16_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("default_int16_ranges_max",
parsed_flags.default_int16_ranges_max.bind(),
parsed_flags.default_int16_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("inference_type", parsed_flags.inference_type.bind(),
parsed_flags.inference_type.default_value(),
"Target data type of arrays in the output file (for input_arrays, "
"this may be overridden by inference_input_type). "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("inference_input_type", parsed_flags.inference_input_type.bind(),
parsed_flags.inference_input_type.default_value(),
"Target data type of input arrays. "
"If not specified, inference_type is used. "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("input_type", parsed_flags.input_type.bind(),
parsed_flags.input_type.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type."),
Flag("input_types", parsed_flags.input_types.bind(),
parsed_flags.input_types.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type. Was meant to be a "
"comma-separated list, but this was deprecated before "
"multiple-input-types was ever properly supported."),
Flag("drop_fake_quant", parsed_flags.drop_fake_quant.bind(),
parsed_flags.drop_fake_quant.default_value(),
"Ignore and discard FakeQuant nodes. For instance, to "
"generate plain float code without fake-quantization from a "
"quantized graph."),
Flag(
"reorder_across_fake_quant",
parsed_flags.reorder_across_fake_quant.bind(),
parsed_flags.reorder_across_fake_quant.default_value(),
"Normally, FakeQuant nodes must be strict boundaries for graph "
"transformations, in order to ensure that quantized inference has "
"the exact same arithmetic behavior as quantized training --- which "
"is the whole point of quantized training and of FakeQuant nodes in "
"the first place. "
"However, that entails subtle requirements on where exactly "
"FakeQuant nodes must be placed in the graph. Some quantized graphs "
"have FakeQuant nodes at unexpected locations, that prevent graph "
"transformations that are necessary in order to generate inference "
"code for these graphs. Such graphs should be fixed, but as a "
"temporary work-around, setting this reorder_across_fake_quant flag "
"allows TOCO to perform necessary graph transformaitons on them, "
"at the cost of no longer faithfully matching inference and training "
"arithmetic."),
Flag("allow_custom_ops", parsed_flags.allow_custom_ops.bind(),
parsed_flags.allow_custom_ops.default_value(),
"If true, allow TOCO to create TF Lite Custom operators for all the "
"unsupported TensorFlow ops."),
Flag("custom_opdefs", parsed_flags.custom_opdefs.bind(),
parsed_flags.custom_opdefs.default_value(),
"List of strings representing custom ops OpDefs that are included "
"in the GraphDef."),
Flag("allow_dynamic_tensors", parsed_flags.allow_dynamic_tensors.bind(),
parsed_flags.allow_dynamic_tensors.default_value(),
"Boolean flag indicating whether the converter should allow models "
"with dynamic Tensor shape. When set to False, the converter will "
"generate runtime memory offsets for activation Tensors (with 128 "
"bits alignment) and error out on models with undetermined Tensor "
"shape. (Default: True)"),
Flag(
"drop_control_dependency",
parsed_flags.drop_control_dependency.bind(),
parsed_flags.drop_control_dependency.default_value(),
"If true, ignore control dependency requirements in input TensorFlow "
"GraphDef. Otherwise an error will be raised upon control dependency "
"inputs."),
Flag("debug_disable_recurrent_cell_fusion",
parsed_flags.debug_disable_recurrent_cell_fusion.bind(),
parsed_flags.debug_disable_recurrent_cell_fusion.default_value(),
"If true, disable fusion of known identifiable cell subgraphs into "
"cells. This includes, for example, specific forms of LSTM cell."),
Flag("propagate_fake_quant_num_bits",
parsed_flags.propagate_fake_quant_num_bits.bind(),
parsed_flags.propagate_fake_quant_num_bits.default_value(),
"If true, use FakeQuant* operator num_bits attributes to adjust "
"array data_types."),
Flag("allow_nudging_weights_to_use_fast_gemm_kernel",
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel.bind(),
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel
.default_value(),
"Some fast uint8 GEMM kernels require uint8 weights to avoid the "
"value 0. This flag allows nudging them to 1 to allow proceeding, "
"with moderate inaccuracy."),
Flag("dedupe_array_min_size_bytes",
parsed_flags.dedupe_array_min_size_bytes.bind(),
parsed_flags.dedupe_array_min_size_bytes.default_value(),
"Minimum size of constant arrays to deduplicate; arrays smaller "
"will not be deduplicated."),
Flag("split_tflite_lstm_inputs",
parsed_flags.split_tflite_lstm_inputs.bind(),
parsed_flags.split_tflite_lstm_inputs.default_value(),
"Split the LSTM inputs from 5 tensors to 18 tensors for TFLite. "
"Ignored if the output format is not TFLite."),
Flag("quantize_to_float16", parsed_flags.quantize_to_float16.bind(),
parsed_flags.quantize_to_float16.default_value(),
"Used in conjunction with post_training_quantize. Specifies that "
"the weights should be quantized to fp16 instead of the default "
"(int8)"),
Flag("quantize_weights", parsed_flags.quantize_weights.bind(),
parsed_flags.quantize_weights.default_value(),
"Deprecated. Please use --post_training_quantize instead."),
Flag("post_training_quantize", parsed_flags.post_training_quantize.bind(),
parsed_flags.post_training_quantize.default_value(),
"Boolean indicating whether to quantize the weights of the "
"converted float model. Model size will be reduced and there will "
"be latency improvements (at the cost of accuracy)."),
Flag("enable_select_tf_ops", parsed_flags.enable_select_tf_ops.bind(),
parsed_flags.enable_select_tf_ops.default_value(), ""),
Flag("force_select_tf_ops", parsed_flags.force_select_tf_ops.bind(),
parsed_flags.force_select_tf_ops.default_value(), ""),
Flag("unfold_batchmatmul", parsed_flags.unfold_batchmatmul.bind(),
parsed_flags.unfold_batchmatmul.default_value(), ""),
Flag("accumulation_type", parsed_flags.accumulation_type.bind(),
parsed_flags.accumulation_type.default_value(),
"Accumulation type to use with quantize_to_float16"),
Flag("allow_bfloat16", parsed_flags.allow_bfloat16.bind(),
parsed_flags.allow_bfloat16.default_value(), "")};
bool asked_for_help =
*argc == 2 && (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-help"));
if (asked_for_help) {
*msg += tensorflow::Flags::Usage(argv[0], flags);
return false;
} else {
return tensorflow::Flags::Parse(argc, argv, flags);
}
}
namespace {
enum class FlagRequirement {
kNone,
kMustBeSpecified,
kMustNotBeSpecified,
kUseDefault,
};
template <typename T>
void EnforceFlagRequirement(const T& flag, const std::string& flag_name,
FlagRequirement requirement) {
if (requirement == FlagRequirement::kMustBeSpecified) {
QCHECK(flag.specified()) << "Missing required flag " << flag_name;
}
if (requirement == FlagRequirement::kMustNotBeSpecified) {
QCHECK(!flag.specified())
<< "Given other flags, this flag should not have been specified: "
<< flag_name;
}
}
template <typename T>
std::optional<T> GetFlagValue(const Arg<T>& flag, FlagRequirement requirement) {
if (flag.specified()) return flag.value();
if (requirement == FlagRequirement::kUseDefault) return flag.default_value();
return std::optional<T>();
}
}
void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags,
TocoFlags* toco_flags) {
namespace port = toco::port;
port::CheckInitGoogleIsDone("InitGoogle is not done yet");
#define READ_TOCO_FLAG(name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
toco_flags->set_##name(flag_value.value()); \
} \
} while (false)
#define PARSE_TOCO_FLAG(Type, name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
Type x; \
QCHECK(Type##_Parse(flag_value.value(), &x)) \
<< "Unrecognized " << #Type << " value " \
<< parsed_toco_flags.name.value(); \
toco_flags->set_##name(x); \
} \
} while (false)
PARSE_TOCO_FLAG(FileFormat, input_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(FileFormat, output_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(IODataType, inference_type, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, inference_input_type, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(reorder_across_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_custom_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_control_dependency, FlagRequirement::kNone);
READ_TOCO_FLAG(debug_disable_recurrent_cell_fusion, FlagRequirement::kNone);
READ_TOCO_FLAG(propagate_fake_quant_num_bits, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_nudging_weights_to_use_fast_gemm_kernel,
FlagRequirement::kNone);
READ_TOCO_FLAG(dedupe_array_min_size_bytes, FlagRequirement::kNone);
READ_TOCO_FLAG(split_tflite_lstm_inputs, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_weights, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_to_float16, FlagRequirement::kNone);
READ_TOCO_FLAG(post_training_quantize, FlagRequirement::kNone);
READ_TOCO_FLAG(enable_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(force_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(unfold_batchmatmul, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, accumulation_type, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_bfloat16, FlagRequirement::kNone);
if (parsed_toco_flags.force_select_tf_ops.value() &&
!parsed_toco_flags.enable_select_tf_ops.value()) {
LOG(WARNING) << "--force_select_tf_ops should always be used with "
"--enable_select_tf_ops.";
}
if (parsed_toco_flags.input_type.specified()) {
LOG(WARNING)
<< "--input_type is deprecated. It was an ambiguous flag that set both "
"--input_data_types and --inference_input_type. If you are trying "
"to complement the input file with information about the type of "
"input arrays, use --input_data_type. If you are trying to control "
"the quantization/dequantization of real-numbers input arrays in "
"the output file, use --inference_input_type.";
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(parsed_toco_flags.input_type.value(),
&input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.input_types.specified()) {
LOG(WARNING)
<< "--input_types is deprecated. It was an ambiguous flag that set "
"both --input_data_types and --inference_input_type. If you are "
"trying to complement the input file with information about the "
"type of input arrays, use --input_data_type. If you are trying to "
"control the quantization/dequantization of real-numbers input "
"arrays in the output file, use --inference_input_type.";
std::vector<std::string> input_types =
absl::StrSplit(parsed_toco_flags.input_types.value(), ',');
QCHECK(!input_types.empty());
for (size_t i = 1; i < input_types.size(); i++) {
QCHECK_EQ(input_types[i], input_types[0]);
}
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(input_types[0], &input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.quantize_weights.value()) {
LOG(WARNING)
<< "--quantize_weights is deprecated. Falling back to "
"--post_training_quantize. Please switch --post_training_quantize.";
toco_flags->set_post_training_quantize(
parsed_toco_flags.quantize_weights.value());
}
if (parsed_toco_flags.quantize_weights.value()) {
if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) {
LOG(WARNING)
<< "--post_training_quantize quantizes a graph of inference_type "
"FLOAT. Overriding inference type QUANTIZED_UINT8 to FLOAT.";
toco_flags->set_inference_type(IODataType::FLOAT);
}
}
#undef READ_TOCO_FLAG
#undef PARSE_TOCO_FLAG
}
} | #include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
namespace toco {
namespace {
TEST(TocoCmdlineFlagsTest, DefaultValue) {
int argc = 1;
const char* args[] = {"toco", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), true);
}
TEST(TocoCmdlineFlagsTest, ParseFlags) {
int argc = 2;
const char* args[] = {"toco", "--allow_dynamic_tensors=false", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), false);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} |
798 | cpp | tensorflow/tensorflow | toco_port | tensorflow/lite/toco/toco_port.cc | tensorflow/lite/toco/toco_port_test.cc | #ifndef TENSORFLOW_LITE_TOCO_TOCO_PORT_H_
#define TENSORFLOW_LITE_TOCO_TOCO_PORT_H_
#include <string>
#include "google/protobuf/text_format.h"
#include "tensorflow/lite/toco/format_port.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/platform.h"
#if defined(PLATFORM_GOOGLE)
#include "absl/strings/cord.h"
#endif
#ifdef PLATFORM_GOOGLE
#define TFLITE_PROTO_NS proto2
#else
#define TFLITE_PROTO_NS google::protobuf
#endif
#ifdef __ANDROID__
#include <sstream>
namespace std {
template <typename T>
std::string to_string(T value)
{
std::ostringstream os ;
os << value ;
return os.str() ;
}
#ifdef __ARM_ARCH_7A__
double round(double x);
#endif
}
#endif
namespace toco {
namespace port {
void InitGoogleWasDoneElsewhere();
void InitGoogle(const char* usage, int* argc, char*** argv, bool remove_flags);
void CheckInitGoogleIsDone(const char* message);
namespace file {
class Options {};
inline Options Defaults() {
Options o;
return o;
}
tensorflow::Status GetContents(const std::string& filename,
std::string* contents, const Options& options);
tensorflow::Status SetContents(const std::string& filename,
const std::string& contents,
const Options& options);
std::string JoinPath(const std::string& a, const std::string& b);
tensorflow::Status Writable(const std::string& filename);
tensorflow::Status Readable(const std::string& filename,
const Options& options);
tensorflow::Status Exists(const std::string& filename, const Options& options);
}
#if defined(PLATFORM_GOOGLE)
void CopyToBuffer(const ::absl::Cord& src, char* dest);
#endif
void CopyToBuffer(const std::string& src, char* dest);
inline uint32 ReverseBits32(uint32 n) {
n = ((n >> 1) & 0x55555555) | ((n & 0x55555555) << 1);
n = ((n >> 2) & 0x33333333) | ((n & 0x33333333) << 2);
n = ((n >> 4) & 0x0F0F0F0F) | ((n & 0x0F0F0F0F) << 4);
return (((n & 0xFF) << 24) | ((n & 0xFF00) << 8) | ((n & 0xFF0000) >> 8) |
((n & 0xFF000000) >> 24));
}
}
inline bool ParseFromStringOverload(const std::string& in,
TFLITE_PROTO_NS::Message* proto) {
return TFLITE_PROTO_NS::TextFormat::ParseFromString(in, proto);
}
template <typename Proto>
bool ParseFromStringEitherTextOrBinary(const std::string& input_file_contents,
Proto* proto) {
if (proto->ParseFromString(input_file_contents)) {
return true;
}
if (ParseFromStringOverload(input_file_contents, proto)) {
return true;
}
return false;
}
}
#endif
#include "tensorflow/lite/toco/toco_port.h"
#include <cstring>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/toco_types.h"
#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
namespace std {
double round(double x) { return ::round(x); }
}
#endif
namespace toco {
namespace port {
void CopyToBuffer(const std::string& src, char* dest) {
memcpy(dest, src.data(), src.size());
}
#ifdef PLATFORM_GOOGLE
void CopyToBuffer(const absl::Cord& src, char* dest) { src.CopyToArray(dest); }
#endif
}
}
#if defined(PLATFORM_GOOGLE) && !defined(__APPLE__) && \
!defined(__ANDROID__) && !defined(_WIN32)
#include "base/init_google.h"
#include "file/base/file.h"
#include "file/base/filesystem.h"
#include "file/base/helpers.h"
#include "file/base/options.h"
#include "file/base/path.h"
namespace toco {
namespace port {
void InitGoogle(const char* usage, int* argc, char*** argv, bool remove_flags) {
::InitGoogle(usage, argc, argv, remove_flags);
}
void InitGoogleWasDoneElsewhere() {
}
void CheckInitGoogleIsDone(const char* message) {
::CheckInitGoogleIsDone(message);
}
namespace file {
tensorflow::Status ToStatus(const absl::Status& uts) {
if (!uts.ok()) {
return tensorflow::Status(absl::StatusCode(::util::RetrieveErrorCode(uts)),
uts.message());
}
return absl::OkStatus();
}
toco::port::file::Options ToOptions(const ::file::Options& options) {
CHECK_EQ(&options, &::file::Defaults());
return Options();
}
tensorflow::Status Writable(const std::string& filename) {
File* f = nullptr;
const auto status = ::file::Open(filename, "w", &f, ::file::Defaults());
if (f) {
QCHECK_OK(f->Close(::file::Defaults()));
}
return ToStatus(status);
}
tensorflow::Status Readable(const std::string& filename,
const file::Options& options) {
return ToStatus(::file::Readable(filename, ::file::Defaults()));
}
tensorflow::Status Exists(const std::string& filename,
const file::Options& options) {
auto status = ::file::Exists(filename, ::file::Defaults());
return ToStatus(status);
}
tensorflow::Status GetContents(const std::string& filename,
std::string* contents,
const file::Options& options) {
return ToStatus(::file::GetContents(filename, contents, ::file::Defaults()));
}
tensorflow::Status SetContents(const std::string& filename,
const std::string& contents,
const file::Options& options) {
return ToStatus(::file::SetContents(filename, contents, ::file::Defaults()));
}
std::string JoinPath(const std::string& a, const std::string& b) {
return ::file::JoinPath(a, b);
}
}
}
}
#else
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdio>
#if defined(_WIN32)
#include <io.h>
#else
#include <unistd.h>
#endif
#if defined(PLATFORM_GOOGLE)
#include "base/commandlineflags.h"
#endif
namespace toco {
namespace port {
#if defined(_WIN32)
#define close _close
#define open _open
#define read _read
constexpr int kFileCreateMode = _S_IREAD | _S_IWRITE;
constexpr int kFileReadFlags = _O_RDONLY | _O_BINARY;
constexpr int kFileWriteFlags = _O_WRONLY | _O_BINARY | _O_CREAT;
#else
constexpr int kFileCreateMode = 0664;
constexpr int kFileReadFlags = O_RDONLY;
constexpr int kFileWriteFlags = O_CREAT | O_WRONLY;
#endif
static bool port_initialized = false;
void InitGoogleWasDoneElsewhere() { port_initialized = true; }
void InitGoogle(const char* usage, int* argc, char*** argv, bool remove_flags) {
if (!port_initialized) {
#if defined(PLATFORM_GOOGLE)
ParseCommandLineFlags(argc, argv, remove_flags);
#endif
port_initialized = true;
}
}
void CheckInitGoogleIsDone(const char* message) {
CHECK(port_initialized) << message;
}
namespace file {
tensorflow::Status Writable(const string& filename) {
FILE* f = fopen(filename.c_str(), "w");
if (f) {
fclose(f);
return tensorflow::OkStatus();
}
return tensorflow::errors::NotFound("not writable");
}
tensorflow::Status Readable(const string& filename,
const file::Options& options) {
FILE* f = fopen(filename.c_str(), "r");
if (f) {
fclose(f);
return tensorflow::OkStatus();
}
return tensorflow::errors::NotFound("not readable");
}
tensorflow::Status Exists(const string& filename,
const file::Options& options) {
struct stat statbuf;
int ret = stat(filename.c_str(), &statbuf);
if (ret == -1) {
return tensorflow::errors::NotFound("file doesn't exist");
}
return tensorflow::OkStatus();
}
tensorflow::Status GetContents(const string& path, string* output,
const file::Options& options) {
output->clear();
int fd = open(path.c_str(), kFileReadFlags);
if (fd == -1) {
return tensorflow::errors::NotFound("can't open() for read");
}
const int kBufSize = 1 << 16;
char buffer[kBufSize];
while (true) {
int size = read(fd, buffer, kBufSize);
if (size == 0) {
close(fd);
return tensorflow::OkStatus();
} else if (size == -1) {
close(fd);
return tensorflow::errors::Internal("error during read()");
} else {
output->append(buffer, size);
}
}
CHECK(0);
return tensorflow::errors::Internal("internal error");
}
tensorflow::Status SetContents(const string& filename, const string& contents,
const file::Options& options) {
int fd = open(filename.c_str(), kFileWriteFlags, kFileCreateMode);
if (fd == -1) {
return tensorflow::errors::Internal("can't open() for write");
}
size_t i = 0;
while (i < contents.size()) {
size_t to_write = contents.size() - i;
ssize_t written = write(fd, &contents[i], to_write);
if (written == -1) {
close(fd);
return tensorflow::errors::Internal("write() error");
}
i += written;
}
close(fd);
return tensorflow::OkStatus();
}
string JoinPath(const string& base, const string& filename) {
if (base.empty()) return filename;
string base_fixed = base;
if (!base_fixed.empty() && base_fixed.back() == '/') base_fixed.pop_back();
string filename_fixed = filename;
if (!filename_fixed.empty() && filename_fixed.front() == '/')
filename_fixed.erase(0, 1);
return base_fixed + "/" + filename_fixed;
}
}
}
}
#endif | #include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_types.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace toco {
namespace port {
namespace {
#ifdef PLATFORM_GOOGLE
#define TFLITE_PREFIX "third_party/tensorflow/lite/"
#else
#define TFLITE_PREFIX "tensorflow/lite/"
#endif
TEST(TocoPortTest, Exists) {
EXPECT_TRUE(
file::Exists(TFLITE_PREFIX "toco/toco_port_test.cc", file::Defaults())
.ok());
EXPECT_FALSE(
file::Exists("non-existent_file_asldjflasdjf", file::Defaults()).ok());
}
TEST(TocoPortTest, Readable) {
EXPECT_TRUE(
file::Readable(TFLITE_PREFIX "toco/toco_port_test.cc", file::Defaults())
.ok());
EXPECT_FALSE(
file::Readable("non-existent_file_asldjflasdjf", file::Defaults()).ok());
}
TEST(TocoPortTest, JoinPath) {
EXPECT_EQ("part1/part2", file::JoinPath("part1", "part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1/", "part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1", "/part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1/", "/part2"));
}
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} |
799 | cpp | tensorflow/tensorflow | toco_convert | tensorflow/lite/toco/toco_convert.cc | tensorflow/lite/toco/toco_convert_test.cc | #ifndef TENSORFLOW_LITE_TOCO_TOCO_CONVERT_H_
#define TENSORFLOW_LITE_TOCO_TOCO_CONVERT_H_
#include <string>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
namespace toco {
tensorflow::Status Convert(const std::string& graph_def_contents,
const TocoFlags& toco_flags,
const ModelFlags& model_flags,
std::string* output_file_contents,
int64_t* arithmetic_ops_count = nullptr);
tensorflow::Status Convert(const ParsedTocoFlags& parsed_toco_flags,
const ParsedModelFlags& parsed_model_flags);
}
#endif
#include <cstdio>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_cmdline_flags.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/toco_tooling.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
namespace toco {
namespace {
void CheckOutputFilePermissions(const Arg<std::string>& output_file) {
QCHECK(output_file.specified()) << "Missing required flag --output_file.\n";
QCHECK(port::file::Writable(output_file.value()).ok())
<< "Specified output_file is not writable: " << output_file.value()
<< ".\n";
}
void CheckFrozenModelPermissions(const Arg<std::string>& input_file) {
QCHECK(input_file.specified()) << "Missing required flag --input_file.\n";
QCHECK(port::file::Exists(input_file.value(), port::file::Defaults()).ok())
<< "Specified input_file does not exist: " << input_file.value() << ".\n";
QCHECK(port::file::Readable(input_file.value(), port::file::Defaults()).ok())
<< "Specified input_file exists, but is not readable: "
<< input_file.value() << ".\n";
}
void ReadInputData(const ParsedTocoFlags& parsed_toco_flags,
const ParsedModelFlags& parsed_model_flags,
std::string* graph_def_contents) {
port::CheckInitGoogleIsDone("InitGoogle is not done yet.\n");
QCHECK(!parsed_toco_flags.savedmodel_directory.specified())
<< "Use `tensorflow/lite/python/tflite_convert` script with "
<< "SavedModel directories.\n";
CheckFrozenModelPermissions(parsed_toco_flags.input_file);
CHECK(port::file::GetContents(parsed_toco_flags.input_file.value(),
graph_def_contents, port::file::Defaults())
.ok());
}
}
tensorflow::Status Convert(const std::string& graph_def_contents,
const TocoFlags& toco_flags,
const ModelFlags& model_flags,
std::string* output_file_contents,
int64_t* arithmetic_ops_count = nullptr) {
std::unique_ptr<Model> model =
Import(toco_flags, model_flags, graph_def_contents);
TF_RETURN_IF_ERROR(TransformWithStatus(toco_flags, model.get()));
TF_RETURN_IF_ERROR(Export(toco_flags, *model, toco_flags.allow_custom_ops(),
output_file_contents));
if (arithmetic_ops_count != nullptr) {
*arithmetic_ops_count = model->ArithmeticOpsCount();
}
return absl::OkStatus();
}
tensorflow::Status Convert(const ParsedTocoFlags& parsed_toco_flags,
const ParsedModelFlags& parsed_model_flags) {
ModelFlags model_flags;
ReadModelFlagsFromCommandLineFlags(parsed_model_flags, &model_flags);
TocoFlags toco_flags;
ReadTocoFlagsFromCommandLineFlags(parsed_toco_flags, &toco_flags);
std::string graph_def_contents;
ReadInputData(parsed_toco_flags, parsed_model_flags, &graph_def_contents);
CheckOutputFilePermissions(parsed_toco_flags.output_file);
std::string output_file_contents;
TF_RETURN_IF_ERROR(Convert(graph_def_contents, toco_flags, model_flags,
&output_file_contents));
TF_RETURN_IF_ERROR(
port::file::SetContents(parsed_toco_flags.output_file.value(),
output_file_contents, port::file::Defaults()));
return tensorflow::Status();
}
} | #include "tensorflow/lite/toco/toco_convert.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
namespace {
TEST(TocoTest, MissingInputFile) {
ParsedTocoFlags toco_flags;
ParsedModelFlags model_flags;
EXPECT_DEATH(EXPECT_TRUE(Convert(toco_flags, model_flags).ok()),
"Missing required flag --input_file");
}
TEST(TocoTest, BadInputFormat) {
TocoFlags toco_flags;
ModelFlags model_flags;
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Unhandled input_format='FILE_FORMAT_UNKNOWN'");
}
TEST(TocoTest, MissingOutputArrays) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"This model does not define output arrays, so a --output_arrays "
"flag must be given on the command-line");
}
TEST(TocoTest, BadOutputArray) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input;
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Specified output array .output1. is not produced by any op "
"in this graph. Is it a typo");
}
TEST(TocoTest, BadOutputFormat) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "output1"
input: "input1"
input: "input2"
op: "Sub"
attr { key: "T" value { type: DT_FLOAT } }
}
)GraphDef";
std::string output;
EXPECT_DEATH(
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok()),
"Unhandled output_format='FILE_FORMAT_UNKNOWN'");
}
TEST(TocoTest, SimpleFloatModel) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
toco_flags.set_output_format(TENSORFLOW_GRAPHDEF);
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "input1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "input2"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "output1"
input: "input1"
input: "input2"
op: "Sub"
attr { key: "T" value { type: DT_FLOAT } }
}
)GraphDef";
std::string output;
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok());
EXPECT_TRUE(!output.empty());
}
TEST(TocoTest, TransientStringTensors) {
TocoFlags toco_flags;
ModelFlags model_flags;
toco_flags.set_input_format(TENSORFLOW_GRAPHDEF);
toco_flags.set_output_format(TFLITE);
toco::InputArray* input_1 = model_flags.add_input_arrays();
input_1->set_name("input1");
toco::InputArray* indices_1 = model_flags.add_input_arrays();
indices_1->set_name("indices1");
model_flags.add_output_arrays("output1");
std::string input = R"GraphDef(
node {
name: "input1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_STRING } }
attr { key: "shape" value { shape { dim { size:1 }}}}
}
node {
name: "indices1"
op: "Placeholder"
attr { key: "dtype" value { type: DT_INT64 } }
}
node {
name: "intermediate1"
op: "Gather"
input: "input1"
input: "indices1"
attr { key: "Tparams" value { type: DT_STRING } }
attr { key: "Tindices" value { type: DT_INT64 } }
}
node {
name: "output1"
op: "Gather"
input: "intermediate1"
input: "indices2"
attr { key: "Tparams" value { type: DT_STRING } }
attr { key: "Tindices" value { type: DT_INT64 } }
}
)GraphDef";
std::string output;
EXPECT_TRUE(Convert(input, toco_flags, model_flags, &output).ok());
EXPECT_TRUE(!output.empty());
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.