ID
stringlengths
36
36
Language
stringclasses
1 value
Repository Name
stringclasses
13 values
File Name
stringlengths
2
48
File Path in Repository
stringlengths
11
111
File Path for Unit Test
stringlengths
13
116
Code
stringlengths
0
278k
Unit Test - (Ground Truth)
stringlengths
78
663k
Code Url
stringlengths
91
198
Test Code Url
stringlengths
93
203
Commit Hash
stringclasses
13 values
e8aae33a-e103-4f39-a719-bdecff704539
cpp
tensorflow/tensorflow
scanner
third_party/xla/third_party/tsl/tsl/platform/scanner.cc
third_party/xla/third_party/tsl/tsl/platform/scanner_test.cc
#include "tsl/platform/scanner.h" namespace tsl { namespace strings { void Scanner::ScanUntilImpl(char end_ch, bool escaped) { for (;;) { if (cur_.empty()) { Error(); return; } const char ch = cur_[0]; if (ch == end_ch) { return; } cur_.remove_prefix(1); if (escaped && ch == '\\') { if (cur_.empty()) { Error(); return; } cur_.remove_prefix(1); } } } bool Scanner::GetResult(absl::string_view* remaining, absl::string_view* capture) { if (error_) { return false; } if (remaining != nullptr) { *remaining = cur_; } if (capture != nullptr) { const char* end = capture_end_ == nullptr ? cur_.data() : capture_end_; *capture = absl::string_view(capture_start_, end - capture_start_); } return true; } } }
#include "tsl/platform/scanner.h" #include "tsl/platform/test.h" namespace tsl { namespace strings { class ScannerTest : public ::testing::Test { protected: string ClassStr(Scanner::CharClass clz) { string s; for (int i = 0; i < 256; ++i) { char ch = i; if (Scanner::Matches(clz, ch)) { s += ch; } } return s; } }; TEST_F(ScannerTest, Any) { absl::string_view remaining, match; EXPECT_TRUE(Scanner(" horse0123") .Any(Scanner::SPACE) .Any(Scanner::DIGIT) .Any(Scanner::LETTER) .GetResult(&remaining, &match)); EXPECT_EQ(" horse", match); EXPECT_EQ("0123", remaining); EXPECT_TRUE(Scanner("") .Any(Scanner::SPACE) .Any(Scanner::DIGIT) .Any(Scanner::LETTER) .GetResult(&remaining, &match)); EXPECT_EQ("", remaining); EXPECT_EQ("", match); EXPECT_TRUE(Scanner("----") .Any(Scanner::SPACE) .Any(Scanner::DIGIT) .Any(Scanner::LETTER) .GetResult(&remaining, &match)); EXPECT_EQ("----", remaining); EXPECT_EQ("", match); } TEST_F(ScannerTest, AnySpace) { absl::string_view remaining, match; EXPECT_TRUE(Scanner(" a b ") .AnySpace() .One(Scanner::LETTER) .AnySpace() .GetResult(&remaining, &match)); EXPECT_EQ(" a ", match); EXPECT_EQ("b ", remaining); } TEST_F(ScannerTest, AnyEscapedNewline) { absl::string_view remaining, match; EXPECT_TRUE(Scanner("\\\n") .Any(Scanner::LETTER_DIGIT_UNDERSCORE) .GetResult(&remaining, &match)); EXPECT_EQ("\\\n", remaining); EXPECT_EQ("", match); } TEST_F(ScannerTest, AnyEmptyString) { absl::string_view remaining, match; EXPECT_TRUE(Scanner("") .Any(Scanner::LETTER_DIGIT_UNDERSCORE) .GetResult(&remaining, &match)); EXPECT_EQ("", remaining); EXPECT_EQ("", match); } TEST_F(ScannerTest, Eos) { EXPECT_FALSE(Scanner("a").Eos().GetResult()); EXPECT_TRUE(Scanner("").Eos().GetResult()); EXPECT_FALSE(Scanner("abc").OneLiteral("ab").Eos().GetResult()); EXPECT_TRUE(Scanner("abc").OneLiteral("abc").Eos().GetResult()); } TEST_F(ScannerTest, Many) { absl::string_view remaining, match; EXPECT_TRUE(Scanner("abc").Many(Scanner::LETTER).GetResult()); EXPECT_FALSE(Scanner("0").Many(Scanner::LETTER).GetResult()); EXPECT_FALSE(Scanner("").Many(Scanner::LETTER).GetResult()); EXPECT_TRUE( Scanner("abc ").Many(Scanner::LETTER).GetResult(&remaining, &match)); EXPECT_EQ(" ", remaining); EXPECT_EQ("abc", match); EXPECT_TRUE( Scanner("abc").Many(Scanner::LETTER).GetResult(&remaining, &match)); EXPECT_EQ("", remaining); EXPECT_EQ("abc", match); } TEST_F(ScannerTest, One) { absl::string_view remaining, match; EXPECT_TRUE(Scanner("abc").One(Scanner::LETTER).GetResult()); EXPECT_FALSE(Scanner("0").One(Scanner::LETTER).GetResult()); EXPECT_FALSE(Scanner("").One(Scanner::LETTER).GetResult()); EXPECT_TRUE(Scanner("abc") .One(Scanner::LETTER) .One(Scanner::LETTER) .GetResult(&remaining, &match)); EXPECT_EQ("c", remaining); EXPECT_EQ("ab", match); EXPECT_TRUE(Scanner("a").One(Scanner::LETTER).GetResult(&remaining, &match)); EXPECT_EQ("", remaining); EXPECT_EQ("a", match); } TEST_F(ScannerTest, OneLiteral) { EXPECT_FALSE(Scanner("abc").OneLiteral("abC").GetResult()); EXPECT_TRUE(Scanner("abc").OneLiteral("ab").OneLiteral("c").GetResult()); } TEST_F(ScannerTest, ScanUntil) { absl::string_view remaining, match; EXPECT_TRUE(Scanner(R"(' \1 \2 \3 \' \\'rest)") .OneLiteral("'") .ScanUntil('\'') .OneLiteral("'") .GetResult(&remaining, &match)); EXPECT_EQ(R"( \\'rest)", remaining); EXPECT_EQ(R"(' \1 \2 \3 \')", match); remaining = match = "unset"; EXPECT_FALSE(Scanner(R"(' \1 \2 \3 \\rest)") .OneLiteral("'") .ScanUntil('\'') .GetResult(&remaining, &match)); EXPECT_EQ("unset", remaining); EXPECT_EQ("unset", match); remaining = match = ""; EXPECT_TRUE( Scanner(R"(123\456)").ScanUntil('\\').GetResult(&remaining, &match)); EXPECT_EQ(R"(\456)", remaining); EXPECT_EQ("123", match); } TEST_F(ScannerTest, ScanEscapedUntil) { absl::string_view remaining, match; EXPECT_TRUE(Scanner(R"(' \1 \2 \3 \' \\'rest)") .OneLiteral("'") .ScanEscapedUntil('\'') .OneLiteral("'") .GetResult(&remaining, &match)); EXPECT_EQ("rest", remaining); EXPECT_EQ(R"(' \1 \2 \3 \' \\')", match); remaining = match = "unset"; EXPECT_FALSE(Scanner(R"(' \1 \2 \3 \' \\rest)") .OneLiteral("'") .ScanEscapedUntil('\'') .GetResult(&remaining, &match)); EXPECT_EQ("unset", remaining); EXPECT_EQ("unset", match); } TEST_F(ScannerTest, ZeroOrOneLiteral) { absl::string_view remaining, match; EXPECT_TRUE( Scanner("abc").ZeroOrOneLiteral("abC").GetResult(&remaining, &match)); EXPECT_EQ("abc", remaining); EXPECT_EQ("", match); EXPECT_TRUE( Scanner("abcd").ZeroOrOneLiteral("ab").ZeroOrOneLiteral("c").GetResult( &remaining, &match)); EXPECT_EQ("d", remaining); EXPECT_EQ("abc", match); EXPECT_TRUE( Scanner("").ZeroOrOneLiteral("abc").GetResult(&remaining, &match)); EXPECT_EQ("", remaining); EXPECT_EQ("", match); } TEST_F(ScannerTest, CaptureAndGetResult) { absl::string_view remaining, match; Scanner scan(" first second"); EXPECT_TRUE(scan.Any(Scanner::SPACE) .RestartCapture() .One(Scanner::LETTER) .Any(Scanner::LETTER_DIGIT) .StopCapture() .Any(Scanner::SPACE) .GetResult(&remaining, &match)); EXPECT_EQ("second", remaining); EXPECT_EQ("first", match); EXPECT_TRUE(scan.GetResult()); remaining = ""; EXPECT_TRUE(scan.GetResult(&remaining)); EXPECT_EQ("second", remaining); remaining = ""; match = ""; EXPECT_TRUE(scan.GetResult(&remaining, &match)); EXPECT_EQ("second", remaining); EXPECT_EQ("first", match); scan.RestartCapture().One(Scanner::LETTER).One(Scanner::LETTER); remaining = ""; match = ""; EXPECT_TRUE(scan.GetResult(&remaining, &match)); EXPECT_EQ("cond", remaining); EXPECT_EQ("se", match); } TEST_F(ScannerTest, MultipleGetResultExtendsCapture) { absl::string_view remaining, match; Scanner scan("one2three"); EXPECT_TRUE(scan.Many(Scanner::LETTER).GetResult(&remaining, &match)); EXPECT_EQ("2three", remaining); EXPECT_EQ("one", match); EXPECT_TRUE(scan.Many(Scanner::DIGIT).GetResult(&remaining, &match)); EXPECT_EQ("three", remaining); EXPECT_EQ("one2", match); EXPECT_TRUE(scan.Many(Scanner::LETTER).GetResult(&remaining, &match)); EXPECT_EQ("", remaining); EXPECT_EQ("one2three", match); } TEST_F(ScannerTest, FailedMatchDoesntChangeResult) { Scanner scan("name"); absl::string_view remaining = "rem"; absl::string_view match = "match"; EXPECT_FALSE(scan.One(Scanner::SPACE).GetResult(&remaining, &match)); EXPECT_EQ("rem", remaining); EXPECT_EQ("match", match); } TEST_F(ScannerTest, DefaultCapturesAll) { Scanner scan("a b"); absl::string_view remaining = "rem"; absl::string_view match = "match"; EXPECT_TRUE(scan.Any(Scanner::LETTER) .AnySpace() .Any(Scanner::LETTER) .GetResult(&remaining, &match)); EXPECT_EQ("", remaining); EXPECT_EQ("a b", match); } TEST_F(ScannerTest, AllCharClasses) { EXPECT_EQ(256, ClassStr(Scanner::ALL).size()); EXPECT_EQ("0123456789", ClassStr(Scanner::DIGIT)); EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER)); EXPECT_EQ("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT)); EXPECT_EQ( "-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_" "abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT_DASH_UNDERSCORE)); EXPECT_EQ( "-./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT_DASH_DOT_SLASH)); EXPECT_EQ( "-./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_" "abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE)); EXPECT_EQ(".0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT_DOT)); EXPECT_EQ("+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT_DOT_PLUS_MINUS)); EXPECT_EQ(".0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT_DOT_UNDERSCORE)); EXPECT_EQ("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LETTER_DIGIT_UNDERSCORE)); EXPECT_EQ("abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LOWERLETTER)); EXPECT_EQ("0123456789abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LOWERLETTER_DIGIT)); EXPECT_EQ("0123456789_abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LOWERLETTER_DIGIT_UNDERSCORE)); EXPECT_EQ("123456789", ClassStr(Scanner::NON_ZERO_DIGIT)); EXPECT_EQ("\t\n\v\f\r ", ClassStr(Scanner::SPACE)); EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZ", ClassStr(Scanner::UPPERLETTER)); EXPECT_EQ(">", ClassStr(Scanner::RANGLE)); } TEST_F(ScannerTest, Peek) { EXPECT_EQ('a', Scanner("abc").Peek()); EXPECT_EQ('a', Scanner("abc").Peek('b')); EXPECT_EQ('\0', Scanner("").Peek()); EXPECT_EQ('z', Scanner("").Peek('z')); EXPECT_EQ('A', Scanner("0123A").Any(Scanner::DIGIT).Peek()); EXPECT_EQ('\0', Scanner("0123A").Any(Scanner::LETTER_DIGIT).Peek()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/scanner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/scanner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
831874c0-a4e8-420e-a31a-f79ad8866b8a
cpp
tensorflow/tensorflow
stringprintf
third_party/xla/third_party/tsl/tsl/platform/stringprintf.cc
third_party/xla/third_party/tsl/tsl/platform/stringprintf_test.cc
#include "tsl/platform/stringprintf.h" #include <errno.h> #include <stdarg.h> #include <stdio.h> namespace tsl { namespace strings { void Appendv(string* dst, const char* format, va_list ap) { static const int kSpaceLength = 1024; char space[kSpaceLength]; va_list backup_ap; va_copy(backup_ap, ap); int result = vsnprintf(space, kSpaceLength, format, backup_ap); va_end(backup_ap); if (result < kSpaceLength) { if (result >= 0) { dst->append(space, result); return; } #ifdef _MSC_VER va_copy(backup_ap, ap); result = vsnprintf(nullptr, 0, format, backup_ap); va_end(backup_ap); #endif if (result < 0) { return; } } int length = result + 1; char* buf = new char[length]; va_copy(backup_ap, ap); result = vsnprintf(buf, length, format, backup_ap); va_end(backup_ap); if (result >= 0 && result < length) { dst->append(buf, result); } delete[] buf; } string Printf(const char* format, ...) { va_list ap; va_start(ap, format); string result; Appendv(&result, format, ap); va_end(ap); return result; } void Appendf(string* dst, const char* format, ...) { va_list ap; va_start(ap, format); Appendv(dst, format, ap); va_end(ap); } } }
#include "tsl/platform/stringprintf.h" #include <string> #include "tsl/platform/test.h" namespace tsl { namespace strings { namespace { TEST(PrintfTest, Empty) { EXPECT_EQ("", Printf("%s", string().c_str())); EXPECT_EQ("", Printf("%s", "")); } TEST(PrintfTest, Misc) { #if !defined(_MSC_VER) EXPECT_EQ("123hello w", Printf("%3$d%2$s %1$c", 'w', "hello", 123)); #endif } TEST(AppendfTest, Empty) { string value("Hello"); const char* empty = ""; Appendf(&value, "%s", empty); EXPECT_EQ("Hello", value); } TEST(AppendfTest, EmptyString) { string value("Hello"); Appendf(&value, "%s", ""); EXPECT_EQ("Hello", value); } TEST(AppendfTest, String) { string value("Hello"); Appendf(&value, " %s", "World"); EXPECT_EQ("Hello World", value); } TEST(AppendfTest, Int) { string value("Hello"); Appendf(&value, " %d", 123); EXPECT_EQ("Hello 123", value); } TEST(PrintfTest, Multibyte) { char* old_locale = setlocale(LC_CTYPE, nullptr); setlocale(LC_CTYPE, "en_US.utf8"); const char kInvalidCodePoint[] = "\375\067s"; string value = Printf("%.*s", 3, kInvalidCodePoint); EXPECT_TRUE(value.empty() || value == kInvalidCodePoint); int n = 2048; char* buf = new char[n + 1]; memset(buf, ' ', n - 3); memcpy(buf + n - 3, kInvalidCodePoint, 4); value = Printf("%.*s", n, buf); EXPECT_TRUE(value.empty() || value == buf); delete[] buf; setlocale(LC_CTYPE, old_locale); } TEST(PrintfTest, NoMultibyte) { char* old_locale = setlocale(LC_CTYPE, nullptr); setlocale(LC_CTYPE, "POSIX"); string value = Printf("%.*s", 3, "\375\067s"); setlocale(LC_CTYPE, old_locale); EXPECT_EQ("\375\067s", value); } TEST(PrintfTest, DontOverwriteErrno) { errno = ECHILD; string value = Printf("Hello, %s!", "World"); EXPECT_EQ(ECHILD, errno); } TEST(PrintfTest, LargeBuf) { int n = 2048; char* buf = new char[n + 1]; memset(buf, ' ', n); buf[n] = 0; string value = Printf("%s", buf); EXPECT_EQ(buf, value); delete[] buf; } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stringprintf.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stringprintf_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ee8e14f6-ef93-4bb8-9efa-87a35039a7f1
cpp
tensorflow/tensorflow
denormal
third_party/xla/third_party/tsl/tsl/platform/denormal.cc
third_party/xla/third_party/tsl/tsl/platform/denormal_test.cc
#include "tsl/platform/denormal.h" #include <cstdint> #include "tsl/platform/cpu_info.h" #include "tsl/platform/platform.h" #if !defined(__SSE3__) && !defined(__clang__) && \ (defined(__GNUC__) && (__GNUC__ < 4) || \ ((__GNUC__ == 4) && (__GNUC_MINOR__ < 9))) #define GCC_WITHOUT_INTRINSICS #endif #if defined(PLATFORM_IS_X86) && !defined(IS_MOBILE_PLATFORM) && \ !defined(GCC_WITHOUT_INTRINSICS) #define X86_DENORM_USE_INTRINSICS #endif #ifdef X86_DENORM_USE_INTRINSICS #include <pmmintrin.h> #endif #if defined(PLATFORM_IS_ARM) && defined(__ARM_FP) && (__ARM_FP > 0) #define ARM_DENORM_AVAILABLE #define ARM_FPCR_FZ (1 << 24) #endif namespace tsl { namespace port { bool DenormalState::operator==(const DenormalState& other) const { return flush_to_zero() == other.flush_to_zero() && denormals_are_zero() == other.denormals_are_zero(); } bool DenormalState::operator!=(const DenormalState& other) const { return !(this->operator==(other)); } #ifdef ARM_DENORM_AVAILABLE static inline void ArmSetFloatingPointControlRegister(uint32_t fpcr) { #ifdef PLATFORM_IS_ARM64 __asm__ __volatile__("msr fpcr, %[fpcr]" : : [fpcr] "r"(static_cast<uint64_t>(fpcr))); #else __asm__ __volatile__("vmsr fpscr, %[fpcr]" : : [fpcr] "r"(fpcr)); #endif } static inline uint32_t ArmGetFloatingPointControlRegister() { uint32_t fpcr; #ifdef PLATFORM_IS_ARM64 uint64_t fpcr64; __asm__ __volatile__("mrs %[fpcr], fpcr" : [fpcr] "=r"(fpcr64)); fpcr = static_cast<uint32_t>(fpcr64); #else __asm__ __volatile__("vmrs %[fpcr], fpscr" : [fpcr] "=r"(fpcr)); #endif return fpcr; } #endif bool SetDenormalState(const DenormalState& state) { #ifdef X86_DENORM_USE_INTRINSICS if (TestCPUFeature(SSE3)) { _MM_SET_FLUSH_ZERO_MODE(state.flush_to_zero() ? _MM_FLUSH_ZERO_ON : _MM_FLUSH_ZERO_OFF); _MM_SET_DENORMALS_ZERO_MODE(state.denormals_are_zero() ? _MM_DENORMALS_ZERO_ON : _MM_DENORMALS_ZERO_OFF); return true; } #endif #ifdef ARM_DENORM_AVAILABLE if (state.flush_to_zero() == state.denormals_are_zero()) { uint32_t fpcr = ArmGetFloatingPointControlRegister(); if (state.flush_to_zero()) { fpcr |= ARM_FPCR_FZ; } else { fpcr &= ~ARM_FPCR_FZ; } ArmSetFloatingPointControlRegister(fpcr); return true; } #endif return false; } DenormalState GetDenormalState() { #ifdef X86_DENORM_USE_INTRINSICS if (TestCPUFeature(SSE3)) { bool flush_zero_mode = _MM_GET_FLUSH_ZERO_MODE() == _MM_FLUSH_ZERO_ON; bool denormals_zero_mode = _MM_GET_DENORMALS_ZERO_MODE() == _MM_DENORMALS_ZERO_ON; return DenormalState(flush_zero_mode, denormals_zero_mode); } #endif #ifdef ARM_DENORM_AVAILABLE uint32_t fpcr = ArmGetFloatingPointControlRegister(); if ((fpcr & ARM_FPCR_FZ) != 0) { return DenormalState(true, true); } #endif return DenormalState(false, false); } ScopedRestoreFlushDenormalState::ScopedRestoreFlushDenormalState() : denormal_state_(GetDenormalState()) {} ScopedRestoreFlushDenormalState::~ScopedRestoreFlushDenormalState() { SetDenormalState(denormal_state_); } ScopedFlushDenormal::ScopedFlushDenormal() { SetDenormalState( DenormalState(true, true)); } ScopedDontFlushDenormal::ScopedDontFlushDenormal() { SetDenormalState( DenormalState(false, false)); } } }
#include "tsl/platform/denormal.h" #include <cstring> #include <limits> #include "tsl/platform/test.h" namespace tsl { namespace port { TEST(DenormalStateTest, ConstructorAndAccessorsWork) { const bool flush_to_zero[] = {true, true, false, false}; const bool denormals_are_zero[] = {true, false, true, false}; for (int i = 0; i < 4; ++i) { const DenormalState state = DenormalState(flush_to_zero[i], denormals_are_zero[i]); EXPECT_EQ(state.flush_to_zero(), flush_to_zero[i]); EXPECT_EQ(state.denormals_are_zero(), denormals_are_zero[i]); } } uint32_t bits(float x) { uint32_t out; memcpy(&out, &x, sizeof(float)); return out; } void CheckDenormalHandling(const DenormalState& state) { volatile float denormal_output = std::numeric_limits<float>::min(); denormal_output *= 0.25f; if (state.flush_to_zero()) { EXPECT_EQ(bits(denormal_output), 0x0); } else { EXPECT_NE(bits(denormal_output), 0x0); } volatile float normal_output = std::numeric_limits<float>::denorm_min(); normal_output *= std::numeric_limits<float>::max(); if (state.denormals_are_zero()) { EXPECT_EQ(bits(normal_output), 0x0); } else { EXPECT_NE(bits(normal_output), 0x0); } } TEST(DenormalTest, GetAndSetStateWorkWithCorrectFlushing) { const DenormalState states[] = { DenormalState(true, true), DenormalState(true, false), DenormalState(false, true), DenormalState(false, false)}; for (const DenormalState& state : states) { if (SetDenormalState(state)) { EXPECT_EQ(GetDenormalState(), state); CheckDenormalHandling(state); } } } TEST(ScopedRestoreFlushDenormalStateTest, RestoresState) { const DenormalState flush_denormals(true, true); const DenormalState dont_flush_denormals(false, false); const bool can_set_denormal_state = SetDenormalState(flush_denormals) && SetDenormalState(dont_flush_denormals); if (can_set_denormal_state) { SetDenormalState(flush_denormals); { ScopedRestoreFlushDenormalState restore_state; SetDenormalState(dont_flush_denormals); EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } EXPECT_EQ(GetDenormalState(), flush_denormals); SetDenormalState(dont_flush_denormals); { ScopedRestoreFlushDenormalState restore_state; SetDenormalState(flush_denormals); EXPECT_EQ(GetDenormalState(), flush_denormals); } EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } } TEST(ScopedFlushDenormalTest, SetsFlushingAndRestoresState) { const DenormalState flush_denormals(true, true); const DenormalState dont_flush_denormals(false, false); const bool can_set_denormal_state = SetDenormalState(flush_denormals) && SetDenormalState(dont_flush_denormals); if (can_set_denormal_state) { SetDenormalState(dont_flush_denormals); { ScopedFlushDenormal scoped_flush_denormal; EXPECT_EQ(GetDenormalState(), flush_denormals); } EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } } TEST(ScopedDontFlushDenormalTest, SetsNoFlushingAndRestoresState) { const DenormalState flush_denormals(true, true); const DenormalState dont_flush_denormals(false, false); const bool can_set_denormal_state = SetDenormalState(flush_denormals) && SetDenormalState(dont_flush_denormals); if (can_set_denormal_state) { SetDenormalState(flush_denormals); { ScopedDontFlushDenormal scoped_dont_flush_denormal; EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } EXPECT_EQ(GetDenormalState(), flush_denormals); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/denormal.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/denormal_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7f390e78-3920-4249-bb9f-33db271e81a9
cpp
tensorflow/tensorflow
coding
third_party/xla/third_party/tsl/tsl/platform/coding.cc
tensorflow/core/lib/core/coding_test.cc
#include "tsl/platform/coding.h" #include "tsl/platform/byte_order.h" #include "tsl/platform/stringpiece.h" #include "tsl/platform/tstring.h" #include "tsl/platform/types.h" namespace tsl { namespace core { void EncodeFixed16(char* buf, uint16 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; } } void EncodeFixed32(char* buf, uint32 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; } } void EncodeFixed64(char* buf, uint64 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; buf[4] = (value >> 32) & 0xff; buf[5] = (value >> 40) & 0xff; buf[6] = (value >> 48) & 0xff; buf[7] = (value >> 56) & 0xff; } } void PutFixed16(string* dst, uint16 value) { char buf[sizeof(value)]; EncodeFixed16(buf, value); dst->append(buf, sizeof(buf)); } void PutFixed32(string* dst, uint32 value) { char buf[sizeof(value)]; EncodeFixed32(buf, value); dst->append(buf, sizeof(buf)); } void PutFixed64(string* dst, uint64 value) { char buf[sizeof(value)]; EncodeFixed64(buf, value); dst->append(buf, sizeof(buf)); } char* EncodeVarint32(char* dst, uint32 v) { unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); static const int B = 128; if (v < (1 << 7)) { *(ptr++) = v; } else if (v < (1 << 14)) { *(ptr++) = v | B; *(ptr++) = v >> 7; } else if (v < (1 << 21)) { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = v >> 14; } else if (v < (1 << 28)) { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = (v >> 14) | B; *(ptr++) = v >> 21; } else { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = (v >> 14) | B; *(ptr++) = (v >> 21) | B; *(ptr++) = v >> 28; } return reinterpret_cast<char*>(ptr); } void PutVarint32(string* dst, uint32 v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); } void PutVarint32(tstring* dst, uint32 v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); } char* EncodeVarint64(char* dst, uint64 v) { static const int B = 128; unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); while (v >= B) { *(ptr++) = (v & (B - 1)) | B; v >>= 7; } *(ptr++) = static_cast<unsigned char>(v); return reinterpret_cast<char*>(ptr); } void PutVarint64(string* dst, uint64 v) { char buf[10]; char* ptr = EncodeVarint64(buf, v); dst->append(buf, ptr - buf); } void PutVarint64(tstring* dst, uint64 v) { char buf[10]; char* ptr = EncodeVarint64(buf, v); dst->append(buf, ptr - buf); } int VarintLength(uint64_t v) { int len = 1; while (v >= 128) { v >>= 7; len++; } return len; } const char* GetVarint32Ptr(const char* p, const char* limit, uint32* value) { if (p < limit) { uint32 result = *(reinterpret_cast<const unsigned char*>(p)); if ((result & 128) == 0) { *value = result; return p + 1; } } return GetVarint32PtrFallback(p, limit, value); } const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32* value) { uint32 result = 0; for (uint32 shift = 0; shift <= 28 && p < limit; shift += 7) { uint32 byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return nullptr; } bool GetVarint32(absl::string_view* input, uint32* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); if (q == nullptr) { return false; } else { *input = absl::string_view(q, limit - q); return true; } } const char* GetVarint64Ptr(const char* p, const char* limit, uint64* value) { uint64 result = 0; for (uint32 shift = 0; shift <= 63 && p < limit; shift += 7) { uint64 byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return nullptr; } bool GetVarint64(absl::string_view* input, uint64* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint64Ptr(p, limit, value); if (q == nullptr) { return false; } else { *input = absl::string_view(q, limit - q); return true; } } } }
#include "tensorflow/core/lib/core/coding.h" #include <vector> #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace core { TEST(Coding, Fixed16) { static const uint16 N = 50000; string s; for (uint16 v = 0; v < N; v++) { char buf[sizeof(uint16)]; EncodeFixed16(buf, v); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (uint16 v = 0; v < N; v++) { uint16 actual = DecodeFixed16(p); ASSERT_EQ(v, actual); p += sizeof(uint16); } } TEST(Coding, Fixed32) { static const uint32 N = 100000; string s; for (uint32 v = 0; v < N; v++) { char buf[sizeof(uint32)]; EncodeFixed32(buf, v); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (uint32 v = 0; v < N; v++) { uint32 actual = DecodeFixed32(p); ASSERT_EQ(v, actual); p += sizeof(uint32); } } TEST(Coding, Fixed64) { string s; for (int power = 0; power <= 63; power++) { uint64 v = static_cast<uint64>(1) << power; char buf[sizeof(uint64)]; EncodeFixed64(buf, v - 1); s.append(buf, sizeof(buf)); EncodeFixed64(buf, v + 0); s.append(buf, sizeof(buf)); EncodeFixed64(buf, v + 1); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (int power = 0; power <= 63; power++) { uint64 v = static_cast<uint64>(1) << power; uint64 actual; actual = DecodeFixed64(p); ASSERT_EQ(v - 1, actual); p += sizeof(uint64); actual = DecodeFixed64(p); ASSERT_EQ(v + 0, actual); p += sizeof(uint64); actual = DecodeFixed64(p); ASSERT_EQ(v + 1, actual); p += sizeof(uint64); } } TEST(Coding, EncodingOutput) { char dst[8]; EncodeFixed16(dst, 0x0201); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); EncodeFixed32(dst, 0x04030201); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); ASSERT_EQ(0x03, static_cast<int>(dst[2])); ASSERT_EQ(0x04, static_cast<int>(dst[3])); EncodeFixed64(dst, 0x0807060504030201ull); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); ASSERT_EQ(0x03, static_cast<int>(dst[2])); ASSERT_EQ(0x04, static_cast<int>(dst[3])); ASSERT_EQ(0x05, static_cast<int>(dst[4])); ASSERT_EQ(0x06, static_cast<int>(dst[5])); ASSERT_EQ(0x07, static_cast<int>(dst[6])); ASSERT_EQ(0x08, static_cast<int>(dst[7])); } TEST(Coding, Varint32) { string s; for (uint32 i = 0; i < (32 * 32); i++) { uint32 v = (i / 32) << (i % 32); PutVarint32(&s, v); } const char* p = s.data(); const char* limit = p + s.size(); for (uint32 i = 0; i < (32 * 32); i++) { uint32 expected = (i / 32) << (i % 32); uint32 actual; p = GetVarint32Ptr(p, limit, &actual); ASSERT_TRUE(p != nullptr); ASSERT_EQ(expected, actual); } ASSERT_EQ(p, s.data() + s.size()); } TEST(Coding, Varint64) { std::vector<uint64> values; values.push_back(0); values.push_back(100); values.push_back(~static_cast<uint64>(0)); values.push_back(~static_cast<uint64>(0) - 1); for (uint32 k = 0; k < 64; k++) { const uint64 power = 1ull << k; values.push_back(power); values.push_back(power - 1); values.push_back(power + 1); } string s; for (size_t i = 0; i < values.size(); i++) { PutVarint64(&s, values[i]); } const char* p = s.data(); const char* limit = p + s.size(); for (size_t i = 0; i < values.size(); i++) { ASSERT_TRUE(p < limit); uint64 actual; p = GetVarint64Ptr(p, limit, &actual); ASSERT_TRUE(p != nullptr); ASSERT_EQ(values[i], actual); } ASSERT_EQ(p, limit); } TEST(Coding, Varint32Overflow) { uint32 result; string input("\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) == nullptr); } TEST(Coding, Varint32Truncation) { uint32 large_value = (1u << 31) + 100; string s; PutVarint32(&s, large_value); uint32 result; for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr); } ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } TEST(Coding, Varint64Overflow) { uint64 result; string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) == nullptr); } TEST(Coding, Varint64Truncation) { uint64 large_value = (1ull << 63) + 100ull; string s; PutVarint64(&s, large_value); uint64 result; for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr); } ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/coding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/coding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3685acb6-6aa3-475c-a394-49e4602f0efb
cpp
tensorflow/tensorflow
base64
third_party/xla/third_party/tsl/tsl/platform/base64.cc
tensorflow/core/lib/strings/base64_test.cc
#include "tsl/platform/base64.h" #include <cstring> #include <memory> #include "absl/status/status.h" #include "tsl/platform/errors.h" #include "tsl/platform/macros.h" #include "tsl/platform/stringpiece.h" #include "tsl/platform/types.h" namespace tsl { namespace { constexpr int8 kBase64Bytes[128] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0x3E, -1, -1, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, -1, -1, -1, -1, -1, -1, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -1, -1, -1, -1, 0x3F, -1, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, -1, -1, -1, -1, -1}; constexpr char kBase64UrlSafeChars[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"; constexpr char kPadChar = '='; inline uint32 Convert(char x) { const int8_t y = kBase64Bytes[x & 0x7F] | (x & 0x80); const int32_t z = static_cast<int32>(y); return static_cast<uint32>(z); } absl::Status DecodeThreeChars(const char* codes, char* result) { const uint32 packed = (Convert(codes[0]) << 18) | (Convert(codes[1]) << 12) | (Convert(codes[2]) << 6) | (Convert(codes[3])); if (TF_PREDICT_FALSE((packed & 0xFF000000) != 0)) { return errors::InvalidArgument("Invalid character found in base64."); } result[0] = static_cast<char>(packed >> 16); result[1] = static_cast<char>(packed >> 8); result[2] = static_cast<char>(packed); return absl::OkStatus(); } } template <typename T> absl::Status Base64Decode(absl::string_view data, T* decoded) { if (decoded == nullptr) { return errors::Internal("'decoded' cannot be nullptr."); } if (data.empty()) { decoded->clear(); return absl::OkStatus(); } const size_t max_decoded_size = 3 * (data.size() / 4) + 3; std::unique_ptr<char[]> buffer(new char[max_decoded_size]); char* current = buffer.get(); if (current == nullptr) { return errors::ResourceExhausted( "Failed to allocate buffer for decoded string."); } const char* b64 = data.data(); const char* end = data.data() + data.size(); while (end - b64 > 4) { TF_RETURN_IF_ERROR(DecodeThreeChars(b64, current)); b64 += 4; current += 3; } if (end - b64 == 4) { if (b64[2] == kPadChar && b64[3] == kPadChar) { end -= 2; } if (b64[2] != kPadChar && b64[3] == kPadChar) { end -= 1; } } const int remain = static_cast<int>(end - b64); if (TF_PREDICT_FALSE(remain == 1)) { return errors::InvalidArgument( "Base64 string length cannot be 1 modulo 4."); } char tail[4] = {kBase64UrlSafeChars[0], kBase64UrlSafeChars[0], kBase64UrlSafeChars[0], kBase64UrlSafeChars[0]}; std::memcpy(tail, b64, remain * sizeof(*b64)); TF_RETURN_IF_ERROR(DecodeThreeChars(tail, current)); current += remain - 1; decoded->assign(buffer.get(), current - buffer.get()); return absl::OkStatus(); } template <typename T> absl::Status Base64Encode(absl::string_view source, T* encoded) { return Base64Encode(source, false, encoded); } template <typename T> absl::Status Base64Encode(absl::string_view source, bool with_padding, T* encoded) { const char* const base64_chars = kBase64UrlSafeChars; if (encoded == nullptr) { return errors::Internal("'encoded' cannot be nullptr."); } const size_t max_encoded_size = 4 * (source.size() / 3) + 4; std::unique_ptr<char[]> buffer(new char[max_encoded_size]); char* current = buffer.get(); if (current == nullptr) { return errors::ResourceExhausted( "Failed to allocate buffer for encoded string."); } const char* data = source.data(); const char* const end = source.data() + source.size(); while (end - data >= 3) { *current++ = base64_chars[(data[0] >> 2) & 0x3F]; *current++ = base64_chars[((data[0] & 0x03) << 4) | ((data[1] >> 4) & 0x0F)]; *current++ = base64_chars[((data[1] & 0x0F) << 2) | ((data[2] >> 6) & 0x03)]; *current++ = base64_chars[data[2] & 0x3F]; data += 3; } if (end - data == 2) { *current++ = base64_chars[(data[0] >> 2) & 0x3F]; *current++ = base64_chars[((data[0] & 0x03) << 4) | ((data[1] >> 4) & 0x0F)]; *current++ = base64_chars[(data[1] & 0x0F) << 2]; if (with_padding) { *current++ = kPadChar; } } else if (end - data == 1) { *current++ = base64_chars[(data[0] >> 2) & 0x3F]; *current++ = base64_chars[(data[0] & 0x03) << 4]; if (with_padding) { *current++ = kPadChar; *current++ = kPadChar; } } encoded->assign(buffer.get(), current - buffer.get()); return absl::OkStatus(); } template Status Base64Decode<std::string>(StringPiece data, std::string* decoded); template Status Base64Encode<std::string>(StringPiece source, std::string* encoded); template Status Base64Encode<std::string>(StringPiece source, bool with_padding, std::string* encoded); template Status Base64Decode<tstring>(StringPiece data, tstring* decoded); template Status Base64Encode<tstring>(StringPiece source, tstring* encoded); template Status Base64Encode<tstring>(StringPiece source, bool with_padding, tstring* encoded); }
#include "tensorflow/core/lib/strings/base64.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(Base64, EncodeDecode) { const string original = "a simple test message!"; tstring encoded; TF_EXPECT_OK(Base64Encode(original, &encoded)); EXPECT_EQ("YSBzaW1wbGUgdGVzdCBtZXNzYWdlIQ", encoded); tstring decoded; TF_EXPECT_OK(Base64Decode(encoded, &decoded)); EXPECT_EQ(original, decoded); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/base64.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/strings/base64_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9be20d07-7939-4106-9f23-e493d6c48dba
cpp
tensorflow/tensorflow
retrying_utils
third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc
third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc
#include "tsl/platform/retrying_utils.h" #include <algorithm> #include <cmath> #include <cstdint> #include <limits> #include "absl/time/time.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/file_system.h" #include "tsl/platform/logging.h" #include "tsl/platform/random.h" namespace tsl { namespace { bool IsRetriable(absl::StatusCode code) { switch (code) { case absl::StatusCode::kUnavailable: case absl::StatusCode::kDeadlineExceeded: case absl::StatusCode::kUnknown: return true; default: return false; } } double GenerateUniformRandomNumber() { return random::New64() * (1.0 / std::numeric_limits<uint64_t>::max()); } double GenerateUniformRandomNumberBetween(double a, double b) { if (a == b) return a; DCHECK_LT(a, b); return a + GenerateUniformRandomNumber() * (b - a); } } absl::Status RetryingUtils::CallWithRetries( const std::function<absl::Status()>& f, const RetryConfig& config) { return CallWithRetries( f, [](int64_t micros) { return Env::Default()->SleepForMicroseconds(micros); }, config); } absl::Status RetryingUtils::CallWithRetries( const std::function<absl::Status()>& f, const std::function<void(int64_t)>& sleep_usec, const RetryConfig& config) { int retries = 0; while (true) { auto status = f(); if (!IsRetriable(status.code())) { return status; } if (retries >= config.max_retries) { return absl::Status( absl::StatusCode::kAborted, strings::StrCat( "All ", config.max_retries, " retry attempts failed. The last failure: ", status.message())); } int64_t delay_micros = 0; if (config.init_delay_time_us > 0) { const int64_t random_micros = random::New64() % 1000000; delay_micros = std::min(config.init_delay_time_us << retries, config.max_delay_time_us) + random_micros; } VLOG(1) << "The operation failed and will be automatically retried in " << (delay_micros / 1000000.0) << " seconds (attempt " << (retries + 1) << " out of " << config.max_retries << "), caused by: " << status.ToString(); sleep_usec(delay_micros); retries++; } } absl::Status RetryingUtils::DeleteWithRetries( const std::function<absl::Status()>& delete_func, const RetryConfig& config) { bool is_retried = false; return RetryingUtils::CallWithRetries( [delete_func, &is_retried]() { const absl::Status status = delete_func(); if (is_retried && status.code() == error::NOT_FOUND) { return absl::OkStatus(); } is_retried = true; return status; }, config); } absl::Duration ComputeRetryBackoff(int current_retry_attempt, absl::Duration min_delay, absl::Duration max_delay) { DCHECK_GE(current_retry_attempt, 0); constexpr double kBackoffBase = 1.3; constexpr double kBackoffRandMult = 0.4; const absl::Duration first_term = min_delay * kBackoffRandMult; absl::Duration uncapped_second_term = min_delay * std::pow(kBackoffBase, current_retry_attempt); absl::Duration second_term = std::min(uncapped_second_term, max_delay - first_term); second_term *= GenerateUniformRandomNumberBetween(1.0 - kBackoffRandMult, 1.0); return std::max(first_term + second_term, min_delay); } }
#include "tsl/platform/retrying_utils.h" #include <cmath> #include <fstream> #include "absl/time/time.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/str_util.h" #include "tsl/platform/test.h" namespace tsl { namespace { TEST(RetryingUtilsTest, CallWithRetries_RetryDelays) { std::vector<double> requested_delays; std::function<void(int64_t)> sleep = [&requested_delays](int64_t delay) { requested_delays.emplace_back(delay / 1000000.0); }; std::function<absl::Status()> f = []() { return errors::Unavailable("Failed."); }; const auto& status = RetryingUtils::CallWithRetries( f, sleep, RetryConfig(500000 )); EXPECT_TRUE(errors::IsAborted(status)); EXPECT_TRUE(absl::StrContains( status.message(), "All 10 retry attempts failed. The last failure: Failed.")) << status; EXPECT_EQ(10, requested_delays.size()); EXPECT_NEAR(0.5, requested_delays[0], 1.0); EXPECT_NEAR(1.0, requested_delays[1], 1.0); EXPECT_NEAR(2.0, requested_delays[2], 1.0); EXPECT_NEAR(4.0, requested_delays[3], 1.0); EXPECT_NEAR(8.0, requested_delays[4], 1.0); EXPECT_NEAR(16.0, requested_delays[5], 1.0); EXPECT_NEAR(32.0, requested_delays[6], 1.0); EXPECT_NEAR(32.0, requested_delays[7], 1.0); EXPECT_NEAR(32.0, requested_delays[8], 1.0); EXPECT_NEAR(32.0, requested_delays[9], 1.0); } TEST(RetryingUtilsTest, CallWithRetries_NotFoundIsNotRetried) { std::vector<absl::Status> results( {errors::Unavailable("Failed."), errors::NotFound("Not found.")}); std::function<absl::Status()> f = [&results]() { auto result = results[0]; results.erase(results.begin()); return result; }; EXPECT_TRUE(errors::IsNotFound(RetryingUtils::CallWithRetries( f, RetryConfig(0 )))); } TEST(RetryingUtilsTest, CallWithRetries_ImmediateSuccess) { std::vector<absl::Status> results({absl::OkStatus()}); std::function<void(int64_t)> sleep = [](int64_t delay) { ADD_FAILURE() << "Unexpected call to sleep."; }; std::function<absl::Status()> f = [&results]() { auto result = results[0]; results.erase(results.begin()); return result; }; TF_EXPECT_OK(RetryingUtils::CallWithRetries( f, sleep, RetryConfig(1L ))); } TEST(RetryingUtilsTest, CallWithRetries_EventualSuccess) { std::vector<absl::Status> results({errors::Unavailable("Failed."), errors::Unavailable("Failed again."), absl::OkStatus()}); std::function<absl::Status()> f = [&results]() { auto result = results[0]; results.erase(results.begin()); return result; }; TF_EXPECT_OK(RetryingUtils::CallWithRetries( f, RetryConfig(0 ))); } TEST(RetryingUtilsTest, DeleteWithRetries_ImmediateSuccess) { std::vector<absl::Status> delete_results({absl::OkStatus()}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); return result; }; TF_EXPECT_OK(RetryingUtils::DeleteWithRetries( delete_func, RetryConfig(0 ))); } TEST(RetryingUtilsTest, DeleteWithRetries_EventualSuccess) { std::vector<absl::Status> delete_results( {errors::Unavailable(""), absl::OkStatus()}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); return result; }; TF_EXPECT_OK(RetryingUtils::DeleteWithRetries( delete_func, RetryConfig(0 ))); } TEST(RetryingUtilsTest, DeleteWithRetries_PermissionDeniedNotRetried) { std::vector<absl::Status> delete_results( {errors::Unavailable(""), errors::PermissionDenied("")}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); return result; }; EXPECT_TRUE(errors::IsPermissionDenied(RetryingUtils::DeleteWithRetries( delete_func, RetryConfig(0 )))); } TEST(RetryingUtilsTest, DeleteWithRetries_SuccessThroughFileNotFound) { std::vector<absl::Status> delete_results( {errors::Unavailable(""), errors::NotFound("")}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); return result; }; TF_EXPECT_OK(RetryingUtils::DeleteWithRetries( delete_func, RetryConfig(0 ))); } TEST(RetryingUtilsTest, DeleteWithRetries_FirstNotFoundReturnedAsIs) { std::vector<absl::Status> delete_results({errors::NotFound("")}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); return result; }; EXPECT_EQ(error::NOT_FOUND, RetryingUtils::DeleteWithRetries( delete_func, RetryConfig(0 )) .code()); } TEST(RetryingUtilsTest, ComputeRetryBackoff) { for (int i = 0; i < 30; ++i) { EXPECT_LE(0.4 * absl::Milliseconds(1) + 0.6 * absl::Milliseconds(1) * std::pow(1.3, i), ComputeRetryBackoff(i)); EXPECT_LE( ComputeRetryBackoff(i), 0.4 * absl::Milliseconds(1) + absl::Milliseconds(1) * std::pow(1.3, i)); } } TEST(RetryingUtilsTest, ComputeRetryBackoff_MinMaxDelays) { for (int i = 0; i < 30; ++i) { EXPECT_EQ(ComputeRetryBackoff(i, absl::Seconds(10)), absl::Seconds(10)); EXPECT_EQ(ComputeRetryBackoff(i, absl::Microseconds(1), absl::Microseconds(1)), absl::Microseconds(1)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f6f99474-0f88-43f5-9b8e-12bcde99b9a3
cpp
tensorflow/tensorflow
env
tensorflow/c/env.cc
tensorflow/c/env_test.cc
#include "tensorflow/c/env.h" #include "tensorflow/c/c_api_macros.h" #include "tensorflow/c/tf_file_statistics.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/file_statistics.h" #include "tensorflow/core/platform/file_system.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/stringpiece.h" #include "tensorflow/core/platform/types.h" struct TF_StringStream { std::vector<::tensorflow::string>* list; size_t position; }; void TF_CreateDir(const char* dirname, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, ::tensorflow::Env::Default()->CreateDir(dirname)); } void TF_DeleteDir(const char* dirname, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, ::tensorflow::Env::Default()->DeleteDir(dirname)); } void TF_DeleteRecursively(const char* dirname, int64_t* undeleted_file_count, int64_t* undeleted_dir_count, TF_Status* status) { ::int64_t f, d; TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, ::tensorflow::Env::Default()->DeleteRecursively(dirname, &f, &d)); *undeleted_file_count = f; *undeleted_dir_count = d; } void TF_FileStat(const char* filename, TF_FileStatistics* stats, TF_Status* status) { ::tensorflow::FileStatistics cc_stats; TF_SetStatus(status, TF_OK, ""); ::tensorflow::Status s = ::tensorflow::Env::Default()->Stat(filename, &cc_stats); ::tensorflow::Set_TF_Status_from_Status(status, s); if (s.ok()) { stats->length = cc_stats.length; stats->mtime_nsec = cc_stats.mtime_nsec; stats->is_directory = cc_stats.is_directory; } } void TF_NewWritableFile(const char* filename, TF_WritableFileHandle** handle, TF_Status* status) { std::unique_ptr<::tensorflow::WritableFile> f; TF_SetStatus(status, TF_OK, ""); ::tensorflow::Status s = ::tensorflow::Env::Default()->NewWritableFile(filename, &f); ::tensorflow::Set_TF_Status_from_Status(status, s); if (s.ok()) { *handle = reinterpret_cast<TF_WritableFileHandle*>(f.release()); } } void TF_CloseWritableFile(TF_WritableFileHandle* handle, TF_Status* status) { auto* cc_file = reinterpret_cast<::tensorflow::WritableFile*>(handle); TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status(status, cc_file->Close()); delete cc_file; } void TF_SyncWritableFile(TF_WritableFileHandle* handle, TF_Status* status) { auto* cc_file = reinterpret_cast<::tensorflow::WritableFile*>(handle); TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status(status, cc_file->Sync()); } void TF_FlushWritableFile(TF_WritableFileHandle* handle, TF_Status* status) { auto* cc_file = reinterpret_cast<::tensorflow::WritableFile*>(handle); TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status(status, cc_file->Flush()); } void TF_AppendWritableFile(TF_WritableFileHandle* handle, const char* data, size_t length, TF_Status* status) { auto* cc_file = reinterpret_cast<::tensorflow::WritableFile*>(handle); TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, cc_file->Append(::tensorflow::StringPiece{data, length})); } void TF_DeleteFile(const char* filename, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, ::tensorflow::Env::Default()->DeleteFile(filename)); } bool TF_StringStreamNext(TF_StringStream* list, const char** result) { if (list->position >= list->list->size()) { *result = nullptr; return false; } *result = list->list->at(list->position++).c_str(); return true; } void TF_StringStreamDone(TF_StringStream* list) { delete list->list; delete list; } TF_StringStream* TF_GetChildren(const char* dirname, TF_Status* status) { auto* children = new std::vector<::tensorflow::string>; TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, ::tensorflow::Env::Default()->GetChildren(dirname, children)); auto* list = new TF_StringStream; list->list = children; list->position = 0; return list; } TF_StringStream* TF_GetLocalTempDirectories() { auto* tmpdirs = new std::vector<::tensorflow::string>; ::tensorflow::Env::Default()->GetLocalTempDirectories(tmpdirs); auto* list = new TF_StringStream; list->list = tmpdirs; list->position = 0; return list; } char* TF_GetTempFileName(const char* extension) { return strdup(::tensorflow::io::GetTempFilename(extension).c_str()); } TF_CAPI_EXPORT extern uint64_t TF_NowNanos(void) { return ::tensorflow::Env::Default()->NowNanos(); } TF_CAPI_EXPORT extern uint64_t TF_NowMicros(void) { return ::tensorflow::Env::Default()->NowMicros(); } TF_CAPI_EXPORT extern uint64_t TF_NowSeconds(void) { return ::tensorflow::Env::Default()->NowSeconds(); } void TF_DefaultThreadOptions(TF_ThreadOptions* options) { options->stack_size = 0; options->guard_size = 0; options->numa_node = -1; } TF_Thread* TF_StartThread(const TF_ThreadOptions* options, const char* thread_name, void (*work_func)(void*), void* param) { ::tensorflow::ThreadOptions cc_options; cc_options.stack_size = options->stack_size; cc_options.guard_size = options->guard_size; cc_options.numa_node = options->numa_node; return reinterpret_cast<TF_Thread*>(::tensorflow::Env::Default()->StartThread( cc_options, thread_name, [=]() { (*work_func)(param); })); } void TF_JoinThread(TF_Thread* thread) { delete reinterpret_cast<::tensorflow::Thread*>(thread); } void* TF_LoadSharedLibrary(const char* library_filename, TF_Status* status) { void* handle = nullptr; TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, ::tensorflow::Env::Default()->LoadDynamicLibrary(library_filename, &handle)); return handle; } void* TF_GetSymbolFromLibrary(void* handle, const char* symbol_name, TF_Status* status) { void* symbol = nullptr; TF_SetStatus(status, TF_OK, ""); ::tensorflow::Set_TF_Status_from_Status( status, ::tensorflow::Env::Default()->GetSymbolFromLibrary( handle, symbol_name, &symbol)); return symbol; }
#include "tensorflow/c/env.h" #include "tensorflow/c/tf_file_statistics.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #define ASSERT_TF_OK(x) ASSERT_EQ(TF_OK, TF_GetCode(x)) TEST(TestEnv, TestDirHandling) { TF_StringStream* tempdirs = TF_GetLocalTempDirectories(); const char* tempdir; bool found = false; while (TF_StringStreamNext(tempdirs, &tempdir)) { found = true; TF_Status* s = TF_NewStatus(); ::tensorflow::string dirpath = ::tensorflow::io::JoinPath(tempdir, "somedir"); TF_CreateDir(dirpath.c_str(), s); ASSERT_TF_OK(s) << "TF_CreateDir failed for " << dirpath << ": " << TF_Message(s); ::tensorflow::string filepath = ::tensorflow::io::JoinPath(dirpath, "somefile.txt"); TF_WritableFileHandle* handle; TF_NewWritableFile(filepath.c_str(), &handle, s); ASSERT_TF_OK(s) << "NewWritableFile failed for " << filepath << ": " << TF_Message(s); const char* data = "Hello, world!\n"; TF_AppendWritableFile(handle, data, strlen(data), s); ASSERT_TF_OK(s) << "TF_AppendWritableFile failed to append data to file at " << filepath << ": " << TF_Message(s); TF_CloseWritableFile(handle, s); ASSERT_TF_OK(s) << "TF_CloseWritableFile failed to close handle to " << filepath << ": " << TF_Message(s); TF_StringStream* children = TF_GetChildren(dirpath.c_str(), s); ASSERT_TF_OK(s) << "TF_GetChildren failed for " << dirpath; const char* childpath; ASSERT_TRUE(TF_StringStreamNext(children, &childpath)); ASSERT_EQ(::tensorflow::string(childpath), "somefile.txt"); ASSERT_FALSE(TF_StringStreamNext(children, &childpath)); ASSERT_EQ(childpath, nullptr); TF_StringStreamDone(children); TF_FileStatistics stats; TF_FileStat(filepath.c_str(), &stats, s); ASSERT_EQ(stats.length, strlen(data)); ASSERT_FALSE(stats.is_directory); ASSERT_GT(stats.mtime_nsec, 0); TF_DeleteDir(dirpath.c_str(), s); ASSERT_NE(TF_OK, TF_GetCode(s)) << "TF_DeleteDir unexpectedly succeeded with a non-empty directory " << dirpath; TF_DeleteFile(filepath.c_str(), s); ASSERT_TF_OK(s) << "TF_DeleteFile failed for " << filepath << ": " << TF_Message(s); TF_DeleteDir(dirpath.c_str(), s); ASSERT_TF_OK(s) << "TF_DeleteDir failed for " << dirpath << ": " << TF_Message(s); TF_DeleteStatus(s); break; } ASSERT_TRUE(found) << "expected at least one temp dir"; TF_StringStreamDone(tempdirs); } TEST(TestEnv, TestTimeFunctions) { ASSERT_GE(TF_NowSeconds(), 946684800); ASSERT_GE(TF_NowMicros(), 946684800 * 1e6); ASSERT_GE(TF_NowNanos(), 946684800 * 1e9); } namespace { struct SomeThreadData { ::tensorflow::mutex mu; bool did_work = false; }; void SomeThreadFunc(void* data) { auto* real_data = static_cast<SomeThreadData*>(data); ::tensorflow::mutex_lock l(real_data->mu); real_data->did_work = true; } } TEST(TestEnv, TestThreads) { TF_ThreadOptions options; TF_DefaultThreadOptions(&options); SomeThreadData data; TF_Thread* thread = TF_StartThread(&options, "SomeThreadName", &SomeThreadFunc, &data); TF_JoinThread(thread); ::tensorflow::mutex_lock l(data.mu); ASSERT_TRUE(data.did_work); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/env.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/env_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2daed6fc-9200-44fa-9d04-a8857c2e409c
cpp
tensorflow/tensorflow
cpu_utils
third_party/xla/third_party/tsl/tsl/platform/profile_utils/cpu_utils.cc
third_party/xla/third_party/tsl/tsl/platform/profile_utils/cpu_utils_test.cc
#include "tsl/platform/profile_utils/cpu_utils.h" #include <fstream> #include <limits> #include <mutex> #if defined(_WIN32) #include <windows.h> #endif #if defined(__APPLE__) #include <sys/sysctl.h> #endif #include "absl/base/call_once.h" #include "tsl/platform/logging.h" #include "tsl/platform/profile_utils/android_armv7a_cpu_utils_helper.h" namespace tsl { namespace profile_utils { constexpr int64_t CpuUtils::INVALID_FREQUENCY; static ICpuUtilsHelper* cpu_utils_helper_instance_ = nullptr; #if (defined(__powerpc__) || \ defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \ (defined(__s390x__)) uint64 CpuUtils::GetCycleCounterFrequency() { static const uint64 cpu_frequency = GetCycleCounterFrequencyImpl(); return cpu_frequency; } #else int64_t CpuUtils::GetCycleCounterFrequency() { static const int64_t cpu_frequency = GetCycleCounterFrequencyImpl(); return cpu_frequency; } #endif double CpuUtils::GetMicroSecPerClock() { static const double micro_sec_per_clock = (1000.0 * 1000.0) / static_cast<double>(GetCycleCounterFrequency()); return micro_sec_per_clock; } void CpuUtils::ResetClockCycle() { GetCpuUtilsHelperSingletonInstance().ResetClockCycle(); } void CpuUtils::EnableClockCycleProfiling() { GetCpuUtilsHelperSingletonInstance().EnableClockCycleProfiling(); } void CpuUtils::DisableClockCycleProfiling() { GetCpuUtilsHelperSingletonInstance().DisableClockCycleProfiling(); } std::chrono::duration<double> CpuUtils::ConvertClockCycleToTime( const int64_t clock_cycle) { return std::chrono::duration<double>(static_cast<double>(clock_cycle) / GetCycleCounterFrequency()); } int64_t CpuUtils::GetCycleCounterFrequencyImpl() { #if defined(__ANDROID__) return GetCpuUtilsHelperSingletonInstance().CalculateCpuFrequency(); #elif defined(__linux__) std::ifstream cpuinfo("/proc/cpuinfo"); if (!cpuinfo) { LOG(WARNING) << "Failed to open /proc/cpuinfo"; return INVALID_FREQUENCY; } string line; while (std::getline(cpuinfo, line)) { double cpu_freq = 0.0; int retval = 0; double freq_factor = 2.0; #if (defined(__powerpc__) || \ defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) retval = sscanf(line.c_str(), "clock : %lfMHz", &cpu_freq); freq_factor = 1.0; #elif defined(__s390x__) retval = sscanf(line.c_str(), "bogomips per cpu: %lf", &cpu_freq); #elif defined(__aarch64__) retval = sscanf(line.c_str(), "BogoMIPS : %lf", &cpu_freq); #else retval = sscanf(line.c_str(), "bogomips : %lf", &cpu_freq); #endif if (retval > 0) { const double freq_ghz = cpu_freq / 1000.0 / freq_factor; if (retval != 1 || freq_ghz < 0.01) { LOG(WARNING) << "Failed to get CPU frequency: " << freq_ghz << " GHz"; return INVALID_FREQUENCY; } const int64_t freq_n = static_cast<int64_t>(freq_ghz * 1000.0 * 1000.0 * 1000.0); VLOG(1) << "CPU Frequency: " << freq_n << " Hz"; return freq_n; } } LOG(WARNING) << "Failed to find bogomips or clock in /proc/cpuinfo; cannot determine " "CPU frequency"; return INVALID_FREQUENCY; #elif defined(__APPLE__) int64_t freq_hz = 0; size_t freq_hz_size = sizeof(freq_hz); int retval = sysctlbyname("hw.cpufrequency_max", &freq_hz, &freq_hz_size, NULL, 0); if (retval != 0 || freq_hz < 1e6) { int64_t tbfrequency = 0; size_t tbfrequency_size = sizeof(tbfrequency); retval = sysctlbyname("hw.tbfrequency", &tbfrequency, &tbfrequency_size, NULL, 0); if (retval == 0) { clockinfo clock_info; size_t clock_info_size = sizeof(clock_info); retval = sysctlbyname("kern.clockrate", &clock_info, &clock_info_size, NULL, 0); if (retval == 0) { freq_hz = clock_info.hz * tbfrequency; } } if (retval != 0 || freq_hz < 1e6) { LOG(WARNING) << "Failed to get CPU frequency: " << freq_hz << " Hz"; return INVALID_FREQUENCY; } } return freq_hz; #elif defined(_WIN32) LARGE_INTEGER freq; QueryPerformanceFrequency(&freq); return freq.QuadPart; #else return INVALID_FREQUENCY; #endif } ICpuUtilsHelper& CpuUtils::GetCpuUtilsHelperSingletonInstance() { static absl::once_flag flag; absl::call_once(flag, []() { if (cpu_utils_helper_instance_ != nullptr) { LOG(FATAL) << "cpu_utils_helper_instance_ is already instantiated."; } #if defined(__ANDROID__) && (__ANDROID_API__ >= 21) && \ (defined(__ARM_ARCH_7A__) || defined(__aarch64__)) cpu_utils_helper_instance_ = new AndroidArmV7ACpuUtilsHelper(); #else cpu_utils_helper_instance_ = new DefaultCpuUtilsHelper(); #endif }); return *cpu_utils_helper_instance_; } } }
#include "tsl/platform/profile_utils/cpu_utils.h" #include "tsl/platform/logging.h" #include "tsl/platform/profile_utils/clock_cycle_profiler.h" #include "tsl/platform/test.h" namespace tsl { namespace profile_utils { static constexpr bool DBG = false; class CpuUtilsTest : public ::testing::Test { protected: void SetUp() override { CpuUtils::EnableClockCycleProfiling(); } }; TEST_F(CpuUtilsTest, SetUpTestCase) {} TEST_F(CpuUtilsTest, TearDownTestCase) {} TEST_F(CpuUtilsTest, CheckGetCurrentClockCycle) { static constexpr int LOOP_COUNT = 10; const uint64 start_clock_count = CpuUtils::GetCurrentClockCycle(); CHECK_GT(start_clock_count, 0); uint64 prev_clock_count = start_clock_count; for (int i = 0; i < LOOP_COUNT; ++i) { const uint64 clock_count = CpuUtils::GetCurrentClockCycle(); CHECK_GE(clock_count, prev_clock_count); prev_clock_count = clock_count; } const uint64 end_clock_count = CpuUtils::GetCurrentClockCycle(); if (DBG) { LOG(INFO) << "start clock = " << start_clock_count; LOG(INFO) << "end clock = " << end_clock_count; LOG(INFO) << "average clock = " << ((end_clock_count - start_clock_count) / LOOP_COUNT); } } TEST_F(CpuUtilsTest, CheckCycleCounterFrequency) { #if (defined(__powerpc__) || \ defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \ (defined(__s390x__)) const uint64 cpu_frequency = CpuUtils::GetCycleCounterFrequency(); CHECK_GT(cpu_frequency, 0); CHECK_NE(cpu_frequency, unsigned(CpuUtils::INVALID_FREQUENCY)); #else const int64_t cpu_frequency = CpuUtils::GetCycleCounterFrequency(); CHECK_GT(cpu_frequency, 0); CHECK_NE(cpu_frequency, CpuUtils::INVALID_FREQUENCY); #endif if (DBG) { LOG(INFO) << "Cpu frequency = " << cpu_frequency; } } TEST_F(CpuUtilsTest, CheckMicroSecPerClock) { const double micro_sec_per_clock = CpuUtils::GetMicroSecPerClock(); CHECK_GT(micro_sec_per_clock, 0.0); if (DBG) { LOG(INFO) << "Micro sec per clock = " << micro_sec_per_clock; } } TEST_F(CpuUtilsTest, SimpleUsageOfClockCycleProfiler) { static constexpr int LOOP_COUNT = 10; ClockCycleProfiler prof; for (int i = 0; i < LOOP_COUNT; ++i) { prof.Start(); prof.Stop(); } EXPECT_EQ(LOOP_COUNT, static_cast<int>(prof.GetCount() + 0.5)); if (DBG) { prof.DumpStatistics("CpuUtilsTest"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/profile_utils/cpu_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/profile_utils/cpu_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ef064623-aa75-4f81-8a1a-dec1989a3a35
cpp
tensorflow/tensorflow
stacktrace_handler
third_party/xla/third_party/tsl/tsl/platform/windows/stacktrace_handler.cc
third_party/xla/third_party/tsl/tsl/platform/stacktrace_handler_test.cc
#include "tsl/platform/stacktrace_handler.h" #include <windows.h> #include <dbghelp.h> #include <errno.h> #include <io.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <thread> #include "tsl/platform/mutex.h" #include "tsl/platform/stacktrace.h" #include "tsl/platform/types.h" namespace tsl { static mutex alarm_mu(LINKER_INITIALIZED); static bool alarm_activated = false; static void AlarmThreadBody() { alarm_mu.lock(); alarm_mu.Await(Condition(&alarm_activated)); alarm_mu.unlock(); Sleep(60000); signal(SIGABRT, SIG_DFL); abort(); } static bool PtrToString(uintptr_t ptr, char* buf, size_t size) { static constexpr char kHexCharacters[] = "0123456789abcdef"; static constexpr int kHexBase = 16; size_t num_hex_chars = 2 * sizeof(uintptr_t); if (size < (num_hex_chars + 4)) { return false; } buf[0] = '0'; buf[1] = 'x'; int start_index = 2; for (int i = num_hex_chars - 1 + start_index; i >= start_index; --i) { buf[i] = kHexCharacters[ptr % kHexBase]; ptr /= kHexBase; } int current_index = start_index + num_hex_chars; buf[current_index] = '\n'; buf[current_index + 1] = '\0'; return true; } static inline void SafePrintStackTracePointers() { static constexpr char begin_msg[] = "*** BEGIN STACK TRACE POINTERS ***\n"; (void)_write(_fileno(stderr), begin_msg, strlen(begin_msg)); static constexpr int kMaxStackFrames = 64; void* trace[kMaxStackFrames]; int num_frames = CaptureStackBackTrace(0, kMaxStackFrames, trace, NULL); for (int i = 0; i < num_frames; ++i) { char buffer[32] = "unsuccessful ptr conversion"; PtrToString(reinterpret_cast<uintptr_t>(trace[i]), buffer, sizeof(buffer)); (void)_write(_fileno(stderr), buffer, strlen(buffer)); } static constexpr char end_msg[] = "*** END STACK TRACE POINTERS ***\n\n"; (void)_write(_fileno(stderr), end_msg, strlen(end_msg)); } static void StacktraceHandler(int sig) { alarm_mu.lock(); alarm_activated = true; alarm_mu.unlock(); char buf[128]; snprintf(buf, sizeof(buf), "*** Received signal %d ***\n", sig); (void)write(_fileno(stderr), buf, strlen(buf)); SafePrintStackTracePointers(); std::string stacktrace = CurrentStackTrace(); (void)write(_fileno(stderr), stacktrace.c_str(), stacktrace.length()); signal(SIGABRT, SIG_DFL); abort(); } namespace testing { void InstallStacktraceHandler() { int handled_signals[] = {SIGSEGV, SIGABRT, SIGILL, SIGFPE}; std::thread alarm_thread(AlarmThreadBody); alarm_thread.detach(); typedef void (*SignalHandlerPointer)(int); for (int sig : handled_signals) { SignalHandlerPointer previousHandler = signal(sig, StacktraceHandler); if (previousHandler == SIG_ERR) { char buf[128]; snprintf(buf, sizeof(buf), "tensorflow::InstallStackTraceHandler: Warning, can't install " "backtrace signal handler for signal %d, errno:%d \n", sig, errno); (void)write(_fileno(stderr), buf, strlen(buf)); } else if (previousHandler != SIG_DFL) { char buf[128]; snprintf(buf, sizeof(buf), "tensorflow::InstallStackTraceHandler: Warning, backtrace " "signal handler for signal %d overwrote previous handler.\n", sig); (void)write(_fileno(stderr), buf, strlen(buf)); } } } } }
#include <csignal> #include "tsl/platform/logging.h" #include "tsl/platform/test.h" namespace tsl { namespace { TEST(StacktraceHandlerTest, GeneratesStacktrace) { EXPECT_DEATH(raise(SIGABRT), "testing::internal::UnitTestImpl::RunAllTests"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/windows/stacktrace_handler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stacktrace_handler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1c0ad76b-c7c3-4d04-9d52-d416bdef5a84
cpp
tensorflow/tensorflow
mutex
third_party/xla/third_party/tsl/tsl/platform/default/mutex.cc
third_party/xla/third_party/tsl/tsl/platform/mutex_test.cc
#include "tsl/platform/mutex.h" #include <time.h> #include <cstdint> #include "nsync_cv.h" #include "nsync_mu.h" #include "nsync_mu_wait.h" #include "nsync_time.h" namespace tsl { static_assert(sizeof(nsync::nsync_mu) <= sizeof(internal::MuData), "tsl::internal::MuData needs to be bigger"); static inline nsync::nsync_mu *mu_cast(internal::MuData *mu) { return reinterpret_cast<nsync::nsync_mu *>(mu); } static inline const nsync::nsync_mu *mu_cast(const internal::MuData *mu) { return reinterpret_cast<const nsync::nsync_mu *>(mu); } mutex::mutex() { nsync::nsync_mu_init(mu_cast(&mu_)); } void mutex::lock() { nsync::nsync_mu_lock(mu_cast(&mu_)); } bool mutex::try_lock() { return nsync::nsync_mu_trylock(mu_cast(&mu_)) != 0; }; void mutex::unlock() { nsync::nsync_mu_unlock(mu_cast(&mu_)); } void mutex::assert_held() const TF_ASSERT_EXCLUSIVE_LOCK() { nsync::nsync_mu_assert_held(mu_cast(&mu_)); } void mutex::lock_shared() { nsync::nsync_mu_rlock(mu_cast(&mu_)); } bool mutex::try_lock_shared() { return nsync::nsync_mu_rtrylock(mu_cast(&mu_)) != 0; }; void mutex::unlock_shared() { nsync::nsync_mu_runlock(mu_cast(&mu_)); } void mutex::assert_held_shared() const TF_ASSERT_SHARED_LOCK() { nsync::nsync_mu_rassert_held(mu_cast(&mu_)); } static int EvaluateCondition(const void *vcond) { return static_cast<int>(static_cast<const Condition *>(vcond)->Eval()); } void mutex::Await(const Condition &cond) { nsync::nsync_mu_wait(mu_cast(&mu_), &EvaluateCondition, &cond, nullptr); } bool mutex::AwaitWithDeadline(const Condition &cond, uint64_t abs_deadline_ns) { time_t seconds = abs_deadline_ns / (1000 * 1000 * 1000); nsync::nsync_time abs_time = nsync::nsync_time_s_ns( seconds, abs_deadline_ns - seconds * (1000 * 1000 * 1000)); return nsync::nsync_mu_wait_with_deadline(mu_cast(&mu_), &EvaluateCondition, &cond, nullptr, abs_time, nullptr) == 0; } static_assert(sizeof(nsync::nsync_cv) <= sizeof(internal::CVData), "tsl::internal::CVData needs to be bigger"); static inline nsync::nsync_cv *cv_cast(internal::CVData *cv) { return reinterpret_cast<nsync::nsync_cv *>(cv); } condition_variable::condition_variable() { nsync::nsync_cv_init(cv_cast(&cv_)); } void condition_variable::wait(mutex_lock &lock) { nsync::nsync_cv_wait(cv_cast(&cv_), mu_cast(&lock.mutex()->mu_)); } void condition_variable::notify_one() { nsync::nsync_cv_signal(cv_cast(&cv_)); } void condition_variable::notify_all() { nsync::nsync_cv_broadcast(cv_cast(&cv_)); } namespace internal { std::cv_status wait_until_system_clock( CVData *cv_data, MuData *mu_data, const std::chrono::system_clock::time_point timeout_time) { int r = nsync::nsync_cv_wait_with_deadline(cv_cast(cv_data), mu_cast(mu_data), timeout_time, nullptr); return r ? std::cv_status::timeout : std::cv_status::no_timeout; } } }
#include "tsl/platform/mutex.h" #include "tsl/platform/test.h" #include "tsl/platform/threadpool.h" namespace tsl { namespace { class MutexTest : public ::testing::Test { protected: mutex_lock GetLock() TF_NO_THREAD_SAFETY_ANALYSIS { return mutex_lock{mu_}; } tf_shared_lock GetSharedLock() TF_NO_THREAD_SAFETY_ANALYSIS { return tf_shared_lock{mu_}; } bool test_try_lock() { bool test = mu_.try_lock(); if (test) mu_.unlock(); return test; } bool test_try_lock_shared() { bool test = mu_.try_lock_shared(); if (test) mu_.unlock_shared(); return test; } mutex mu_; }; TEST_F(MutexTest, MovableMutexLockTest) { EXPECT_TRUE(test_try_lock()); { mutex_lock lock = GetLock(); EXPECT_FALSE(test_try_lock()); EXPECT_FALSE(test_try_lock_shared()); } EXPECT_TRUE(test_try_lock()); } TEST_F(MutexTest, SharedMutexLockTest) { EXPECT_TRUE(test_try_lock()); { tf_shared_lock lock = GetSharedLock(); EXPECT_FALSE(test_try_lock()); EXPECT_TRUE(test_try_lock_shared()); } EXPECT_TRUE(test_try_lock()); } TEST(ConditionVariableTest, WaitWithPredicate) { constexpr int kNumThreads = 4; mutex mu; condition_variable cv; bool ready = false; int count = 0; tsl::thread::ThreadPool pool(Env::Default(), "condition_variable_test_wait_with_predicate", kNumThreads); for (int i = 0; i < kNumThreads; ++i) { pool.Schedule([&mu, &cv, &ready, &count]() { mutex_lock lock(mu); cv.wait(lock, [&ready] { return ready; }); ++count; cv.notify_one(); }); } { mutex_lock lock(mu); EXPECT_EQ(count, 0); } { mutex_lock lock(mu); ready = true; cv.notify_all(); } { mutex_lock lock(mu); cv.wait(lock, [&count, kNumThreads] { return count == kNumThreads; }); EXPECT_EQ(count, kNumThreads); } } TEST(ConditionVariableTest, WaitWithTruePredicateDoesntBlock) { mutex mu; mutex_lock lock(mu); condition_variable cv; cv.wait(lock, [] { return true; }); EXPECT_TRUE(static_cast<bool>(lock)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/mutex.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/mutex_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9660c787-ae03-4bd7-9c11-73f07e914f24
cpp
tensorflow/tensorflow
unbounded_work_queue
third_party/xla/third_party/tsl/tsl/platform/default/unbounded_work_queue.cc
third_party/xla/third_party/tsl/tsl/platform/unbounded_work_queue_test.cc
#include "tsl/platform/default/unbounded_work_queue.h" #include "absl/memory/memory.h" #include "tsl/platform/env.h" #include "tsl/platform/mutex.h" #include "tsl/platform/numa.h" namespace tsl { UnboundedWorkQueue::UnboundedWorkQueue(Env* env, const string& thread_name, const ThreadOptions& thread_options) : env_(env), thread_name_(thread_name), thread_options_(thread_options) {} UnboundedWorkQueue::~UnboundedWorkQueue() { { mutex_lock l(work_queue_mu_); cancelled_ = true; work_queue_cv_.notify_all(); if (!work_queue_.empty()) { LOG(ERROR) << "UnboundedWorkQueue named \"" << thread_name_ << "\" was " << "deleted with pending work in its queue. This may indicate " << "a potential use-after-free bug."; } } { mutex_lock l(thread_pool_mu_); thread_pool_.clear(); } } void UnboundedWorkQueue::Schedule(WorkFunction fn) { mutex_lock l(work_queue_mu_); work_queue_.push_back(std::move(fn)); work_queue_cv_.notify_one(); if (work_queue_.size() > num_idle_threads_) { Thread* new_thread = env_->StartThread({}, thread_name_, [this]() { PooledThreadFunc(); }); mutex_lock l(thread_pool_mu_); thread_pool_.emplace_back(new_thread); } } void UnboundedWorkQueue::PooledThreadFunc() { if (thread_options_.numa_node != tsl::port::kNUMANoAffinity) { tsl::port::NUMASetThreadNodeAffinity(thread_options_.numa_node); } while (true) { WorkFunction fn; { mutex_lock l(work_queue_mu_); ++num_idle_threads_; while (!cancelled_ && work_queue_.empty()) { work_queue_cv_.wait(l); } if (cancelled_) { return; } fn = std::move(work_queue_.front()); work_queue_.pop_front(); --num_idle_threads_; } fn(); } } }
#include "tsl/platform/unbounded_work_queue.h" #include "absl/memory/memory.h" #include "tsl/platform/random.h" #include "tsl/platform/blocking_counter.h" #include "tsl/platform/env.h" #include "tsl/platform/test.h" namespace tsl { namespace { class UnboundedWorkQueueTest : public ::testing::Test { protected: UnboundedWorkQueueTest() : work_queue_( absl::make_unique<UnboundedWorkQueue>(Env::Default(), "test")) {} ~UnboundedWorkQueueTest() override = default; void RunMultipleCopiesOfClosure(const int num_closures, std::function<void()> fn) { for (int i = 0; i < num_closures; ++i) { work_queue_->Schedule([this, fn]() { fn(); mutex_lock l(mu_); ++closure_count_; cond_var_.notify_all(); }); } } void BlockUntilClosuresDone(const int num_closures) { mutex_lock l(mu_); while (closure_count_ < num_closures) { cond_var_.wait(l); } } void ResetQueue() { work_queue_.reset(); } int NumClosuresExecuted() { mutex_lock l(mu_); return closure_count_; } private: mutex mu_; int closure_count_ TF_GUARDED_BY(mu_) = 0; condition_variable cond_var_; std::unique_ptr<UnboundedWorkQueue> work_queue_; }; TEST_F(UnboundedWorkQueueTest, SingleClosure) { constexpr int num_closures = 1; RunMultipleCopiesOfClosure(num_closures, []() {}); BlockUntilClosuresDone(num_closures); } TEST_F(UnboundedWorkQueueTest, MultipleClosures) { constexpr int num_closures = 10; RunMultipleCopiesOfClosure(num_closures, []() {}); BlockUntilClosuresDone(num_closures); } TEST_F(UnboundedWorkQueueTest, MultipleClosuresSleepingRandomly) { constexpr int num_closures = 1000; RunMultipleCopiesOfClosure(num_closures, []() { Env::Default()->SleepForMicroseconds(random::New64() % 10); }); BlockUntilClosuresDone(num_closures); } TEST_F(UnboundedWorkQueueTest, NestedClosures) { constexpr int num_closures = 10; RunMultipleCopiesOfClosure(num_closures, [=]() { RunMultipleCopiesOfClosure(num_closures, []() {}); }); BlockUntilClosuresDone(num_closures * num_closures + num_closures); } TEST_F(UnboundedWorkQueueTest, RacyDestructor) { constexpr int num_closures = 100; RunMultipleCopiesOfClosure(num_closures, []() {}); ResetQueue(); EXPECT_LE(NumClosuresExecuted(), num_closures); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/unbounded_work_queue.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/unbounded_work_queue_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
062641ba-79ae-4aaf-bfae-9c7fea4d3d1c
cpp
tensorflow/tensorflow
subprocess
third_party/xla/third_party/tsl/tsl/platform/windows/subprocess.cc
third_party/xla/third_party/tsl/tsl/platform/subprocess_test.cc
#include "tsl/platform/subprocess.h" #include <io.h> #include <signal.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <windows.h> #include <vector> #include "tsl/platform/logging.h" #include "tsl/platform/strcat.h" #define PIPE_BUF_SIZE 4096 namespace tsl { namespace { static bool IsProcessFinished(HANDLE h) { DWORD process_return_code = STILL_ACTIVE; GetExitCodeProcess(h, &process_return_code); return process_return_code != STILL_ACTIVE; } struct ThreadData { string* iobuf; HANDLE iohandle; }; DWORD WINAPI InputThreadFunction(LPVOID param) { ThreadData* args = reinterpret_cast<ThreadData*>(param); string* input = args->iobuf; HANDLE in_handle = args->iohandle; size_t buffer_pointer = 0; size_t total_bytes_written = 0; bool ok = true; while (ok && total_bytes_written < input->size()) { DWORD bytes_written_this_time; ok = WriteFile(in_handle, input->data() + total_bytes_written, input->size() - total_bytes_written, &bytes_written_this_time, nullptr); total_bytes_written += bytes_written_this_time; } CloseHandle(in_handle); if (!ok) { return GetLastError(); } else { return 0; } } DWORD WINAPI OutputThreadFunction(LPVOID param) { ThreadData* args = reinterpret_cast<ThreadData*>(param); string* output = args->iobuf; HANDLE out_handle = args->iohandle; char buf[PIPE_BUF_SIZE]; DWORD bytes_read; bool wait_result = WaitForSingleObject(out_handle, INFINITE); if (wait_result != WAIT_OBJECT_0) { LOG(FATAL) << "WaitForSingleObject on child process output failed. " "Error code: " << wait_result; } while (ReadFile(out_handle, buf, sizeof(buf), &bytes_read, nullptr) && bytes_read > 0) { output->append(buf, bytes_read); } CloseHandle(out_handle); return 0; } } SubProcess::SubProcess(int nfds) : running_(false), win_pi_(nullptr), exec_path_(nullptr), exec_argv_(nullptr) { for (int i = 0; i < kNFds; i++) { action_[i] = ACTION_CLOSE; parent_pipe_[i] = nullptr; } } SubProcess::~SubProcess() { mutex_lock procLock(proc_mu_); mutex_lock dataLock(data_mu_); if (win_pi_) { auto* pi = reinterpret_cast<PROCESS_INFORMATION*>(win_pi_); CloseHandle(pi->hProcess); CloseHandle(pi->hThread); delete pi; win_pi_ = nullptr; } running_ = false; FreeArgs(); ClosePipes(); } void SubProcess::FreeArgs() { free(exec_path_); exec_path_ = nullptr; if (exec_argv_) { for (int i = 0; exec_argv_[i]; i++) { free(exec_argv_[i]); } delete[] exec_argv_; exec_argv_ = nullptr; } } void SubProcess::ClosePipes() { for (int i = 0; i < kNFds; i++) { if (parent_pipe_[i] != nullptr) { CloseHandle(parent_pipe_[i]); parent_pipe_[i] = nullptr; } } } void SubProcess::SetProgram(const string& file, const std::vector<string>& argv) { mutex_lock procLock(proc_mu_); mutex_lock dataLock(data_mu_); if (running_) { LOG(FATAL) << "SetProgram called after the process was started."; return; } FreeArgs(); exec_path_ = _strdup(file.c_str()); if (exec_path_ == nullptr) { LOG(FATAL) << "SetProgram failed to allocate file string."; return; } int argc = argv.size(); exec_argv_ = new char*[argc + 1]; for (int i = 0; i < argc; i++) { exec_argv_[i] = _strdup(argv[i].c_str()); if (exec_argv_[i] == nullptr) { LOG(FATAL) << "SetProgram failed to allocate command argument."; return; } } exec_argv_[argc] = nullptr; } void SubProcess::SetChannelAction(Channel chan, ChannelAction action) { mutex_lock procLock(proc_mu_); mutex_lock dataLock(data_mu_); if (running_) { LOG(FATAL) << "SetChannelAction called after the process was started."; } else if (!chan_valid(chan)) { LOG(FATAL) << "SetChannelAction called with invalid channel: " << chan; } else if ((action != ACTION_CLOSE) && (action != ACTION_PIPE) && (action != ACTION_DUPPARENT)) { LOG(FATAL) << "SetChannelAction called with invalid action: " << action; } else { action_[chan] = action; } } bool SubProcess::Start() { mutex_lock procLock(proc_mu_); mutex_lock dataLock(data_mu_); if (running_) { LOG(ERROR) << "Start called after the process was started."; return false; } if ((exec_path_ == nullptr) || (exec_argv_ == nullptr)) { LOG(ERROR) << "Start called without setting a program."; return false; } SECURITY_ATTRIBUTES attrs; attrs.nLength = sizeof(SECURITY_ATTRIBUTES); attrs.bInheritHandle = TRUE; attrs.lpSecurityDescriptor = nullptr; HANDLE child_pipe_[kNFds] TF_GUARDED_BY(data_mu_); for (int i = 0; i < kNFds; i++) { if (action_[i] == ACTION_PIPE) { if (!CreatePipe(i == CHAN_STDIN ? child_pipe_ + i : parent_pipe_ + i, i == CHAN_STDIN ? parent_pipe_ + i : child_pipe_ + i, &attrs, PIPE_BUF_SIZE)) { LOG(ERROR) << "Cannot create pipe. Error code: " << GetLastError(); ClosePipes(); return false; } if (!SetHandleInformation(parent_pipe_[i], HANDLE_FLAG_INHERIT, 0)) { LOG(ERROR) << "Cannot set pipe handle attributes."; ClosePipes(); return false; } } else if (action_[i] == ACTION_DUPPARENT) { if (i == CHAN_STDIN) { child_pipe_[i] = GetStdHandle(STD_INPUT_HANDLE); } else if (i == CHAN_STDOUT) { child_pipe_[i] = GetStdHandle(STD_OUTPUT_HANDLE); } else { child_pipe_[i] = GetStdHandle(STD_ERROR_HANDLE); } } else { parent_pipe_[i] = nullptr; child_pipe_[i] = nullptr; } } string command_line = strings::StrCat("\"", exec_path_, "\""); for (int i = 1; exec_argv_[i]; i++) { command_line.append(strings::StrCat(" \"", exec_argv_[i], "\"")); } STARTUPINFOA si; ZeroMemory(&si, sizeof(STARTUPINFO)); si.cb = sizeof(STARTUPINFO); si.dwFlags |= STARTF_USESHOWWINDOW; si.wShowWindow = SW_HIDE; si.dwFlags |= STARTF_USESTDHANDLES; if (child_pipe_[CHAN_STDIN]) { si.hStdInput = child_pipe_[CHAN_STDIN]; } if (child_pipe_[CHAN_STDOUT]) { si.hStdOutput = child_pipe_[CHAN_STDOUT]; } if (child_pipe_[CHAN_STDERR]) { si.hStdError = child_pipe_[CHAN_STDERR]; } win_pi_ = new PROCESS_INFORMATION; bool bSuccess = CreateProcessA(nullptr, const_cast<char*>(command_line.c_str()), nullptr, nullptr, TRUE, CREATE_NO_WINDOW, nullptr, nullptr, &si, reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)); if (bSuccess) { for (int i = 0; i < kNFds; i++) { if (child_pipe_[i] != nullptr) { CloseHandle(child_pipe_[i]); child_pipe_[i] = nullptr; } } running_ = true; return true; } else { LOG(ERROR) << "Call to CreateProcess failed. Error code: " << GetLastError() << ", command: '" << command_line << "'"; ClosePipes(); return false; } } bool SubProcess::Wait() { int status; return WaitInternal(&status); } bool SubProcess::WaitInternal(int* status) { proc_mu_.lock(); bool running = running_; PROCESS_INFORMATION pi_ = *reinterpret_cast<PROCESS_INFORMATION*>(win_pi_); proc_mu_.unlock(); bool ret = false; if (running && pi_.hProcess) { DWORD wait_status = WaitForSingleObject(pi_.hProcess, INFINITE); if (wait_status == WAIT_OBJECT_0) { DWORD process_exit_code = 0; if (GetExitCodeProcess(pi_.hProcess, &process_exit_code)) { *status = static_cast<int>(process_exit_code); } else { LOG(FATAL) << "Wait failed with code: " << GetLastError(); } } else { LOG(FATAL) << "WaitForSingleObject call on the process handle failed. " "Error code: " << wait_status; } } proc_mu_.lock(); if ((running_ == running) && (pi_.hProcess == reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess)) { running_ = false; CloseHandle(reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess); CloseHandle(reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hThread); reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess = nullptr; reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hThread = nullptr; } proc_mu_.unlock(); return *status == 0; } bool SubProcess::Kill(int unused_signal) { proc_mu_.lock(); bool running = running_; PROCESS_INFORMATION pi_ = *reinterpret_cast<PROCESS_INFORMATION*>(win_pi_); proc_mu_.unlock(); bool ret = false; if (running && pi_.hProcess) { ret = TerminateProcess(pi_.hProcess, 0); } return ret; } int SubProcess::Communicate(const string* stdin_input, string* stdout_output, string* stderr_output) { proc_mu_.lock(); bool running = running_; proc_mu_.unlock(); if (!running) { LOG(ERROR) << "Communicate called without a running process."; return 1; } HANDLE thread_handles[kNFds]; int thread_count = 0; ThreadData thread_params[kNFds]; data_mu_.lock(); proc_mu_.lock(); bool process_finished = IsProcessFinished( reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess); proc_mu_.unlock(); if (!process_finished || (parent_pipe_[CHAN_STDOUT] != nullptr) || (parent_pipe_[CHAN_STDERR] != nullptr)) { if (parent_pipe_[CHAN_STDIN] != nullptr) { if (stdin_input) { thread_params[thread_count].iobuf = const_cast<string*>(stdin_input); thread_params[thread_count].iohandle = parent_pipe_[CHAN_STDIN]; parent_pipe_[CHAN_STDIN] = nullptr; thread_handles[thread_count] = CreateThread(NULL, 0, InputThreadFunction, thread_params + thread_count, 0, NULL); thread_count++; } } else { CloseHandle(parent_pipe_[CHAN_STDIN]); parent_pipe_[CHAN_STDIN] = NULL; } if (parent_pipe_[CHAN_STDOUT] != nullptr) { if (stdout_output != nullptr) { thread_params[thread_count].iobuf = stdout_output; thread_params[thread_count].iohandle = parent_pipe_[CHAN_STDOUT]; parent_pipe_[CHAN_STDOUT] = NULL; thread_handles[thread_count] = CreateThread(NULL, 0, OutputThreadFunction, thread_params + thread_count, 0, NULL); thread_count++; } else { CloseHandle(parent_pipe_[CHAN_STDOUT]); parent_pipe_[CHAN_STDOUT] = nullptr; } } if (parent_pipe_[CHAN_STDERR] != nullptr) { if (stderr_output != nullptr) { thread_params[thread_count].iobuf = stderr_output; thread_params[thread_count].iohandle = parent_pipe_[CHAN_STDERR]; parent_pipe_[CHAN_STDERR] = NULL; thread_handles[thread_count] = CreateThread(NULL, 0, OutputThreadFunction, thread_params + thread_count, 0, NULL); thread_count++; } else { CloseHandle(parent_pipe_[CHAN_STDERR]); parent_pipe_[CHAN_STDERR] = nullptr; } } } if (thread_count > 0) { DWORD wait_result = WaitForMultipleObjects(thread_count, thread_handles, true, INFINITE); if (wait_result != WAIT_OBJECT_0) { LOG(ERROR) << "Waiting on the io threads failed! result: " << wait_result << std::endl; data_mu_.unlock(); return -1; } for (int i = 0; i < thread_count; i++) { DWORD exit_code; if (GetExitCodeThread(thread_handles[i], &exit_code)) { if (exit_code) { LOG(ERROR) << "One of the IO threads failed with code: " << exit_code; } } else { LOG(ERROR) << "Error checking io thread exit statuses. Error Code: " << GetLastError(); } } } data_mu_.unlock(); int status; return WaitInternal(&status) ? status : -1; } std::unique_ptr<SubProcess> CreateSubProcess(const std::vector<string>& argv) { std::unique_ptr<SubProcess> proc(new SubProcess()); proc->SetProgram(argv[0], argv); proc->SetChannelAction(CHAN_STDERR, ACTION_DUPPARENT); proc->SetChannelAction(CHAN_STDOUT, ACTION_DUPPARENT); return proc; } }
#include "tsl/platform/subprocess.h" #include <stdlib.h> #include <algorithm> #include <string> #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/path.h" #include "tsl/platform/strcat.h" #include "tsl/platform/test.h" #ifdef PLATFORM_WINDOWS #define WIFEXITED(code) ((code) != 3) #define WEXITSTATUS(code) (code) #define SIGKILL 9 #else #include <sys/wait.h> #endif namespace tsl { namespace { string EchoProgram() { std::string path = io::JoinPath(testing::TslSrcRoot(), "platform", "testdata", "test_echo"); return tsl::io::AppendDotExeIfWindows(path); } string EchoArgv1Program() { std::string path = io::JoinPath(testing::TslSrcRoot(), "platform", "testdata", "test_echo_argv_1"); return tsl::io::AppendDotExeIfWindows(path); } string NoopProgram() { std::string path = io::JoinPath(testing::TslSrcRoot(), "platform", "testdata", "test_noop"); return tsl::io::AppendDotExeIfWindows(path); } string StdErrProgram() { std::string path = io::JoinPath(testing::TslSrcRoot(), "platform", "testdata", "test_stderr"); return tsl::io::AppendDotExeIfWindows(path); } class SubProcessTest : public ::testing::Test {}; TEST_F(SubProcessTest, NoOutputNoComm) { tsl::SubProcess proc; proc.SetProgram(NoopProgram().c_str(), {NoopProgram()}); EXPECT_TRUE(proc.Start()); EXPECT_TRUE(proc.Wait()); } TEST_F(SubProcessTest, NoOutput) { tsl::SubProcess proc; proc.SetProgram(NoopProgram().c_str(), {NoopProgram()}); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE); EXPECT_TRUE(proc.Start()); string out, err; int status = proc.Communicate(nullptr, &out, &err); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); EXPECT_EQ("", out); EXPECT_EQ("", err); } TEST_F(SubProcessTest, Stdout) { tsl::SubProcess proc; const char test_string[] = "hello_world"; proc.SetProgram(EchoArgv1Program().c_str(), {EchoArgv1Program(), test_string}); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE); EXPECT_TRUE(proc.Start()); string out, err; int status = proc.Communicate(nullptr, &out, &err); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); EXPECT_EQ(test_string, out); EXPECT_EQ("", err); } TEST_F(SubProcessTest, StdoutIgnored) { tsl::SubProcess proc; const char test_string[] = "hello_world"; proc.SetProgram(EchoArgv1Program().c_str(), {EchoArgv1Program(), test_string}); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE); EXPECT_TRUE(proc.Start()); int status = proc.Communicate(nullptr, nullptr, nullptr); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); } TEST_F(SubProcessTest, Stderr) { tsl::SubProcess proc; const char test_string[] = "muh_failure!"; proc.SetProgram(StdErrProgram().c_str(), {StdErrProgram(), test_string}); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE); EXPECT_TRUE(proc.Start()); string out, err; int status = proc.Communicate(nullptr, &out, &err); EXPECT_TRUE(WIFEXITED(status)); EXPECT_NE(0, WEXITSTATUS(status)); EXPECT_EQ("", out); EXPECT_EQ(test_string, err); } TEST_F(SubProcessTest, StderrIgnored) { tsl::SubProcess proc; const char test_string[] = "muh_failure!"; proc.SetProgram(StdErrProgram().c_str(), {StdErrProgram(), test_string}); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE); EXPECT_TRUE(proc.Start()); int status = proc.Communicate(nullptr, nullptr, nullptr); EXPECT_TRUE(WIFEXITED(status)); EXPECT_NE(0, WEXITSTATUS(status)); } TEST_F(SubProcessTest, Stdin) { tsl::SubProcess proc; proc.SetProgram(EchoProgram().c_str(), {EchoProgram()}); proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE); EXPECT_TRUE(proc.Start()); string in = "foobar\nbarfoo\nhaha\n"; int status = proc.Communicate(&in, nullptr, nullptr); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); } TEST_F(SubProcessTest, StdinStdout) { tsl::SubProcess proc; proc.SetProgram(EchoProgram().c_str(), {EchoProgram()}); proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); EXPECT_TRUE(proc.Start()); string in = "foobar\nbarfoo\nhaha\n"; string out; int status = proc.Communicate(&in, &out, nullptr); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); out.erase(std::remove(out.begin(), out.end(), '\r'), out.end()); EXPECT_EQ(in, out); } TEST_F(SubProcessTest, StdinChildExit) { tsl::SubProcess proc; proc.SetProgram(NoopProgram().c_str(), {NoopProgram()}); proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE); EXPECT_TRUE(proc.Start()); string in; in.reserve(1000000); for (int i = 0; i < 100000; i++) { in += "hello xyz\n"; } int status = proc.Communicate(&in, nullptr, nullptr); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); } TEST_F(SubProcessTest, StdinStdoutOverlap) { tsl::SubProcess proc; proc.SetProgram(EchoProgram().c_str(), {EchoProgram()}); proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); EXPECT_TRUE(proc.Start()); string in; in.reserve(1000000); for (int i = 0; i < 100000; i++) { in += "hello xyz\n"; } string out; int status = proc.Communicate(&in, &out, nullptr); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); out.erase(std::remove(out.begin(), out.end(), '\r'), out.end()); EXPECT_EQ(in, out); } TEST_F(SubProcessTest, KillProc) { tsl::SubProcess proc; proc.SetProgram(EchoProgram().c_str(), {EchoProgram()}); proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE); proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE); EXPECT_TRUE(proc.Start()); EXPECT_TRUE(proc.Kill(SIGKILL)); EXPECT_TRUE(proc.Wait()); EXPECT_FALSE(proc.Kill(SIGKILL)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/windows/subprocess.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/subprocess_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7f15f528-9a85-447a-add7-29c1b1ef15f7
cpp
tensorflow/tensorflow
net
third_party/xla/third_party/tsl/tsl/platform/windows/net.cc
third_party/xla/third_party/tsl/tsl/platform/net_test.cc
#include "tsl/platform/net.h" #include <sys/types.h> #include <winsock2.h> #include <cstdlib> #include <unordered_set> #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/windows/error_windows.h" #undef ERROR namespace tsl { namespace internal { namespace { bool IsPortAvailable(int* port, bool is_tcp) { const int protocol = is_tcp ? IPPROTO_TCP : 0; SOCKET sock = socket(AF_INET, is_tcp ? SOCK_STREAM : SOCK_DGRAM, protocol); struct sockaddr_in addr; int addr_len = static_cast<int>(sizeof(addr)); int actual_port; CHECK_GE(*port, 0); CHECK_LE(*port, 65535); if (sock == INVALID_SOCKET) { LOG(ERROR) << "socket() failed: " << tsl::internal::WindowsWSAGetLastErrorMessage(); return false; } const int one = 1; int result = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<const char*>(&one), sizeof(one)); if (result == SOCKET_ERROR) { LOG(ERROR) << "setsockopt() failed: " << tsl::internal::WindowsWSAGetLastErrorMessage(); closesocket(sock); return false; } addr.sin_family = AF_INET; addr.sin_addr.s_addr = INADDR_ANY; addr.sin_port = htons((uint16_t)*port); result = bind(sock, (struct sockaddr*)&addr, sizeof(addr)); if (result == SOCKET_ERROR) { LOG(WARNING) << "bind(port=" << *port << ") failed: " << tsl::internal::WindowsWSAGetLastErrorMessage(); closesocket(sock); return false; } result = getsockname(sock, (struct sockaddr*)&addr, &addr_len); if (result == SOCKET_ERROR) { LOG(WARNING) << "getsockname() failed: " << tsl::internal::WindowsWSAGetLastErrorMessage(); closesocket(sock); return false; } CHECK_LE(addr_len, sizeof(addr)); actual_port = ntohs(addr.sin_port); CHECK_GT(actual_port, 0); if (*port == 0) { *port = actual_port; } else { CHECK_EQ(*port, actual_port); } closesocket(sock); return true; } const int kNumRandomPortsToPick = 100; const int kMaximumTrials = 1000; } int PickUnusedPortOrDie() { WSADATA wsaData; if (WSAStartup(MAKEWORD(2, 2), &wsaData) != NO_ERROR) { LOG(ERROR) << "Error at WSAStartup()"; return false; } static std::unordered_set<int> chosen_ports; bool is_tcp = true; int trial = 0; while (true) { int port; trial++; CHECK_LE(trial, kMaximumTrials) << "Failed to pick an unused port for testing."; if (trial == 1) { port = GetCurrentProcessId() % (65536 - 30000) + 30000; } else if (trial <= kNumRandomPortsToPick) { port = rand() % (65536 - 30000) + 30000; } else { port = 0; } if (chosen_ports.find(port) != chosen_ports.end()) { continue; } if (!IsPortAvailable(&port, is_tcp)) { continue; } CHECK_GT(port, 0); if (!IsPortAvailable(&port, !is_tcp)) { is_tcp = !is_tcp; continue; } chosen_ports.insert(port); WSACleanup(); return port; } return 0; } } }
#include "tsl/platform/net.h" #include "tsl/platform/logging.h" #include "tsl/platform/test.h" namespace tsl { namespace internal { TEST(Net, PickUnusedPortOrDie) { int port0 = PickUnusedPortOrDie(); int port1 = PickUnusedPortOrDie(); CHECK_GE(port0, 0); CHECK_LT(port0, 65536); CHECK_GE(port1, 0); CHECK_LT(port1, 65536); CHECK_NE(port0, port1); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/windows/net.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/net_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
721b2893-361f-43f7-b07b-7b0aee6e1d39
cpp
tensorflow/tensorflow
rocm_rocdl_path
third_party/xla/third_party/tsl/tsl/platform/default/rocm_rocdl_path.cc
tensorflow/core/platform/rocm_rocdl_path_test.cc
#include "tsl/platform/rocm_rocdl_path.h" #include <stdlib.h> #include "tsl/platform/path.h" #if !defined(PLATFORM_GOOGLE) && TENSORFLOW_USE_ROCM #include "rocm/rocm_config.h" #endif #include "tsl/platform/logging.h" namespace tsl { std::string RocmRoot() { #if TENSORFLOW_USE_ROCM if (const char* rocm_path_env = std::getenv("ROCM_PATH")) { VLOG(3) << "ROCM root = " << rocm_path_env; return rocm_path_env; } else { VLOG(3) << "ROCM root = " << TF_ROCM_TOOLKIT_PATH; return TF_ROCM_TOOLKIT_PATH; } #else return ""; #endif } std::string RocdlRoot() { if (const char* device_lib_path_env = std::getenv("HIP_DEVICE_LIB_PATH")) { return device_lib_path_env; } else { return io::JoinPath(RocmRoot(), "amdgcn/bitcode"); } } }
#include "tensorflow/core/platform/rocm_rocdl_path.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #if !defined(PLATFORM_GOOGLE) && TENSORFLOW_USE_ROCM #include "rocm/rocm_config.h" #endif namespace tensorflow { #if TENSORFLOW_USE_ROCM TEST(RocmRocdlPathTest, ROCDLPath) { VLOG(2) << "ROCm-Device-Libs root = " << RocdlRoot(); std::vector<string> rocdl_files; TF_EXPECT_OK(Env::Default()->GetMatchingPaths( io::JoinPath(RocdlRoot(), "*.bc"), &rocdl_files)); EXPECT_LT(0, rocdl_files.size()); } #endif }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/rocm_rocdl_path.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/rocm_rocdl_path_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c9708396-efe5-4491-9f3f-40517e42f7ee
cpp
tensorflow/tensorflow
port
tensorflow/core/util/port.cc
third_party/xla/third_party/tsl/tsl/platform/port_test.cc
#include "tensorflow/core/util/port.h" #include "absl/base/call_once.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/util/env_var.h" namespace tensorflow { bool IsGoogleCudaEnabled() { #if GOOGLE_CUDA return true; #else return false; #endif } bool IsBuiltWithROCm() { #if TENSORFLOW_USE_ROCM return true; #else return false; #endif } bool IsBuiltWithXLA() { #if TENSORFLOW_USE_XLA return true; #else return false; #endif } bool IsBuiltWithNvcc() { #if TENSORFLOW_USE_NVCC return true; #else return false; #endif } bool IsAArch32Available() { #if TF_LLVM_AARCH32_AVAILABLE return true; #else return false; #endif } bool IsAArch64Available() { #if TF_LLVM_AARCH64_AVAILABLE return true; #else return false; #endif } bool IsPowerPCAvailable() { #if TF_LLVM_POWERPC_AVAILABLE return true; #else return false; #endif } bool IsSystemZAvailable() { #if TF_LLVM_S390X_AVAILABLE return true; #else return false; #endif } bool IsX86Available() { #if TF_LLVM_X86_AVAILABLE return true; #else return false; #endif } bool GpuSupportsHalfMatMulAndConv() { #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) return true; #else return false; #endif } inline bool DefaultOneDnnPolicy() { #if !defined(INTEL_MKL) return false; #elif defined(PLATFORM_GOOGLE) return true; #elif defined(PLATFORM_WINDOWS) && defined(PLATFORM_IS_X86) return true; #elif defined(__linux__) return port::TestCPUFeature(port::CPUFeature::AVX512_VNNI) || port::TestCPUFeature(port::CPUFeature::AVX512_BF16) || port::TestCPUFeature(port::CPUFeature::AVX_VNNI) || port::TestCPUFeature(port::CPUFeature::AMX_TILE) || port::TestCPUFeature(port::CPUFeature::AMX_INT8) || port::TestCPUFeature(port::CPUFeature::AMX_BF16) || port::TestAarch64CPU( port::Aarch64CPU::ARM_NEOVERSE_V1); #else return false; #endif } bool IsMklEnabled() { #ifndef INTEL_MKL return false; #endif static absl::once_flag once; #ifdef ENABLE_MKL static bool oneDNN_disabled = false; absl::call_once(once, [&] { TF_CHECK_OK(ReadBoolFromEnvVar("TF_DISABLE_MKL", false, &oneDNN_disabled)); if (oneDNN_disabled) VLOG(2) << "TF-MKL: Disabling oneDNN"; }); return (!oneDNN_disabled); #else static bool oneDNN_enabled = DefaultOneDnnPolicy(); absl::call_once(once, [&] { auto status = ReadBoolFromEnvVar("TF_ENABLE_ONEDNN_OPTS", oneDNN_enabled, &oneDNN_enabled); if (!status.ok()) { LOG(WARNING) << "TF_ENABLE_ONEDNN_OPTS is not set to either '0', 'false'," << " '1', or 'true'. Using the default setting: " << oneDNN_enabled; } if (oneDNN_enabled) { LOG(INFO) << "oneDNN custom operations are on. " << "You may see slightly different numerical results due to " << "floating-point round-off errors from different computation " << "orders. To turn them off, set the environment variable " << "`TF_ENABLE_ONEDNN_OPTS=0`."; } }); return oneDNN_enabled; #endif } bool IsZenDnnEnabled() { #ifndef AMD_ZENDNN return false; #else static absl::once_flag once; static bool ZenDNN_enabled = false; absl::call_once(once, [&] { auto status = ReadBoolFromEnvVar("TF_ENABLE_ZENDNN_OPTS", ZenDNN_enabled, &ZenDNN_enabled); if (!status.ok()) { LOG(WARNING) << "TF_ENABLE_ZENDNN_OPTS is not set to either '0', 'false'," << " '1', or 'true'. Using the default setting: " << ZenDNN_enabled; } if (ZenDNN_enabled) { LOG(INFO) << "ZenDNN custom operations are on. " << "You may see slightly different numerical results due to " << "floating-point round-off errors from different computation " << "orders. To turn them off, set the environment variable " << "`TF_ENABLE_ZENDNN_OPTS=0`."; } }); return ZenDNN_enabled; #endif } }
#include <condition_variable> #include "tsl/platform/cpu_info.h" #include "tsl/platform/env_time.h" #include "tsl/platform/mem.h" #include "tsl/platform/mutex.h" #include "tsl/platform/test.h" #include "tsl/platform/threadpool.h" namespace tsl { namespace port { TEST(Port, AlignedMalloc) { for (size_t alignment = 1; alignment <= 1 << 20; alignment <<= 1) { void* p = AlignedMalloc(1, alignment); ASSERT_TRUE(p != nullptr) << "AlignedMalloc(1, " << alignment << ")"; uintptr_t pval = reinterpret_cast<uintptr_t>(p); EXPECT_EQ(pval % alignment, 0); AlignedFree(p); } } TEST(Port, GetCurrentCPU) { const int cpu = GetCurrentCPU(); #if !defined(__APPLE__) EXPECT_GE(cpu, 0); EXPECT_LT(cpu, NumTotalCPUs()); #endif } TEST(ConditionVariable, WaitForMilliseconds_Timeout) { mutex m; mutex_lock l(m); condition_variable cv; ConditionResult result = tsl::kCond_MaybeNotified; time_t start = time(nullptr); while (result == tsl::kCond_MaybeNotified) { result = WaitForMilliseconds(&l, &cv, 3000); } EXPECT_EQ(result, tsl::kCond_Timeout); time_t finish = time(nullptr); EXPECT_GE(finish - start, 3); } TEST(ConditionVariable, WaitForMilliseconds_Signalled) { thread::ThreadPool pool(Env::Default(), "test", 1); mutex m; mutex_lock l(m); condition_variable cv; time_t start = time(nullptr); pool.Schedule([&m, &cv]() { Env::Default()->SleepForMicroseconds(1 * 1000 * 1000); mutex_lock l(m); cv.notify_all(); }); EXPECT_EQ(WaitForMilliseconds(&l, &cv, 3000), tsl::kCond_MaybeNotified); time_t finish = time(nullptr); EXPECT_LT(finish - start, 3); } TEST(ConditionalCriticalSections, AwaitWithDeadline_Timeout) { bool always_false = false; mutex m; m.lock(); time_t start = time(nullptr); bool result = m.AwaitWithDeadline(Condition(&always_false), EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos); time_t finish = time(nullptr); m.unlock(); EXPECT_EQ(result, false); EXPECT_GE(finish - start, 3); } TEST(ConditionalCriticalSections, AwaitWithDeadline_Woken) { thread::ThreadPool pool(Env::Default(), "test", 1); bool woken = false; mutex m; m.lock(); time_t start = time(nullptr); pool.Schedule([&m, &woken]() { Env::Default()->SleepForMicroseconds(1 * 1000 * 1000); m.lock(); woken = true; m.unlock(); }); bool result = m.AwaitWithDeadline( Condition(&woken), EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos); time_t finish = time(nullptr); m.unlock(); EXPECT_EQ(result, true); EXPECT_LT(finish - start, 3); } static bool Invert(bool* b) { return !*b; } class InvertClass { public: explicit InvertClass(bool* value) : value_(value) {} bool Value() { return !*this->value_; } private: InvertClass(); bool* value_; }; TEST(ConditionalCriticalSections, Await_PingPong) { thread::ThreadPool pool(Env::Default(), "test", 1); bool ping_pong = false; bool done = false; mutex m; pool.Schedule([&m, &ping_pong, &done]() { m.lock(); for (int i = 0; i != 1000; i++) { m.Await(Condition(&ping_pong)); ping_pong = false; } done = true; m.unlock(); }); m.lock(); InvertClass invert(&ping_pong); for (int i = 0; i != 1000; i++) { m.Await(Condition(&Invert, &ping_pong)); ping_pong = true; } m.Await(Condition(&done)); m.unlock(); } TEST(ConditionalCriticalSections, Await_PingPongMethod) { thread::ThreadPool pool(Env::Default(), "test", 1); bool ping_pong = false; bool done = false; mutex m; pool.Schedule([&m, &ping_pong, &done]() { m.lock(); for (int i = 0; i != 1000; i++) { m.Await(Condition(&ping_pong)); ping_pong = false; } done = true; m.unlock(); }); m.lock(); InvertClass invert(&ping_pong); for (int i = 0; i != 1000; i++) { m.Await(Condition(&invert, &InvertClass::Value)); ping_pong = true; } m.Await(Condition(&done)); m.unlock(); } TEST(TestCPUFeature, TestFeature) { const bool has_avx = TestCPUFeature(CPUFeature::AVX); LOG(INFO) << "has_avx = " << has_avx; const bool has_avx2 = TestCPUFeature(CPUFeature::AVX2); LOG(INFO) << "has_avx2 = " << has_avx2; } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/port.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/port_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
66841324-c4b6-443c-b0ba-e0af50224d8b
cpp
tensorflow/tensorflow
time_util
third_party/xla/third_party/tsl/tsl/platform/cloud/time_util.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/time_util_test.cc
#include "tsl/platform/cloud/time_util.h" #include <time.h> #include <cmath> #include <cstdio> #include <ctime> #ifdef _WIN32 #define timegm _mkgmtime #endif #include "tsl/platform/errors.h" namespace tsl { namespace { constexpr int64_t kNanosecondsPerSecond = 1000 * 1000 * 1000; } absl::Status ParseRfc3339Time(const string& time, int64_t* mtime_nsec) { tm parsed{0}; float seconds; if (sscanf(time.c_str(), "%4d-%2d-%2dT%2d:%2d:%fZ", &(parsed.tm_year), &(parsed.tm_mon), &(parsed.tm_mday), &(parsed.tm_hour), &(parsed.tm_min), &seconds) != 6) { return errors::Internal( strings::StrCat("Unrecognized RFC 3339 time format: ", time)); } const int int_seconds = std::floor(seconds); parsed.tm_year -= 1900; parsed.tm_mon -= 1; parsed.tm_sec = int_seconds; *mtime_nsec = timegm(&parsed) * kNanosecondsPerSecond + static_cast<int64_t>(std::floor((seconds - int_seconds) * kNanosecondsPerSecond)); return absl::OkStatus(); } }
#include "tsl/platform/cloud/time_util.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/test.h" namespace tsl { TEST(TimeUtil, ParseRfc3339Time) { int64_t mtime_nsec; TF_EXPECT_OK(ParseRfc3339Time("2016-04-29T23:15:24.896Z", &mtime_nsec)); EXPECT_NEAR(1461971724896, mtime_nsec / 1000 / 1000, 1); } TEST(TimeUtil, ParseRfc3339Time_ParseError) { int64_t mtime_nsec; EXPECT_EQ("Unrecognized RFC 3339 time format: 2016-04-29", ParseRfc3339Time("2016-04-29", &mtime_nsec).message()); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/time_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/time_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6f7e1c3b-f30a-4c73-bbc3-a36012a5b579
cpp
tensorflow/tensorflow
gcs_dns_cache
third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache_test.cc
#include "tsl/platform/cloud/gcs_dns_cache.h" #include <cstring> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "tsl/platform/errors.h" #include "tsl/platform/retrying_utils.h" #include "tsl/platform/status.h" #ifndef _WIN32 #include <arpa/inet.h> #include <netdb.h> #include <netinet/in.h> #include <sys/socket.h> #else #include <Windows.h> #include <winsock2.h> #include <ws2tcpip.h> #endif #include <sys/types.h> namespace tsl { namespace { const std::vector<string>& kCachedDomainNames = *new std::vector<string>{"www.googleapis.com", "storage.googleapis.com"}; inline void print_getaddrinfo_error(const string& name, absl::Status return_status) { LOG(ERROR) << "Error resolving " << name << ": " << return_status; } template <typename T> const T& SelectRandomItemUniform(std::default_random_engine* random, const std::vector<T>& items) { CHECK_GT(items.size(), 0); std::uniform_int_distribution<size_t> distribution(0u, items.size() - 1u); size_t choice_index = distribution(*random); return items[choice_index]; } } GcsDnsCache::GcsDnsCache(Env* env, int64_t refresh_rate_secs) : env_(env), refresh_rate_secs_(refresh_rate_secs) {} void GcsDnsCache::AnnotateRequest(HttpRequest* request) { mutex_lock l(mu_); if (!started_) { VLOG(1) << "Starting GCS DNS cache."; DCHECK(!worker_) << "Worker thread already exists!"; addresses_ = ResolveNames(kCachedDomainNames); worker_.reset(env_->StartThread({}, "gcs_dns_worker", [this]() { return WorkerThread(); })); started_ = true; } CHECK_EQ(kCachedDomainNames.size(), addresses_.size()); for (size_t i = 0; i < kCachedDomainNames.size(); ++i) { const string& name = kCachedDomainNames[i]; const std::vector<string>& addresses = addresses_[i]; if (!addresses.empty()) { const string& chosen_address = SelectRandomItemUniform(&random_, addresses); request->AddResolveOverride(name, 443, chosen_address); VLOG(1) << "Annotated DNS mapping: " << name << " --> " << chosen_address; } else { LOG(WARNING) << "No IP addresses available for " << name; } } } std::vector<string> GcsDnsCache::ResolveName(const string& name) { VLOG(1) << "Resolving DNS name: " << name; addrinfo hints; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; addrinfo* result = nullptr; RetryConfig retryConfig( 5000, 50 * 1000 * 5000, 5); const absl::Status getaddrinfo_status = RetryingUtils::CallWithRetries( [&name, &hints, &result]() { int return_code = getaddrinfo(name.c_str(), nullptr, &hints, &result); absl::Status return_status; switch (return_code) { case 0: return_status = absl::OkStatus(); break; #ifndef _WIN32 case EAI_ADDRFAMILY: case EAI_SERVICE: case EAI_SOCKTYPE: case EAI_NONAME: return_status = absl::FailedPreconditionError( absl::StrCat("System in invalid state for getaddrinfo call: ", gai_strerror(return_code))); break; case EAI_AGAIN: case EAI_NODATA: return_status = absl::UnavailableError(absl::StrCat( "Resolving ", name, " is temporarily unavailable")); break; case EAI_BADFLAGS: case EAI_FAMILY: return_status = absl::InvalidArgumentError(absl::StrCat( "Bad arguments for getaddrinfo: ", gai_strerror(return_code))); break; case EAI_FAIL: return_status = absl::NotFoundError( absl::StrCat("Permanent failure resolving ", name, ": ", gai_strerror(return_code))); break; case EAI_MEMORY: return_status = absl::ResourceExhaustedError("Out of memory"); break; case EAI_SYSTEM: default: return_status = absl::UnknownError(strerror(return_code)); #else case WSATYPE_NOT_FOUND: case WSAESOCKTNOSUPPORT: case WSAHOST_NOT_FOUND: return_status = absl::FailedPreconditionError( absl::StrCat("System in invalid state for getaddrinfo call: ", gai_strerror(return_code))); break; case WSATRY_AGAIN: return_status = absl::UnavailableError(absl::StrCat( "Resolving ", name, " is temporarily unavailable")); break; case WSAEINVAL: case WSAEAFNOSUPPORT: return_status = absl::InvalidArgumentError(absl::StrCat( "Bad arguments for getaddrinfo: ", gai_strerror(return_code))); break; case WSANO_RECOVERY: return_status = absl::NotFoundError( absl::StrCat("Permanent failure resolving ", name, ": ", gai_strerror(return_code))); break; case WSA_NOT_ENOUGH_MEMORY: return_status = absl::ResourceExhaustedError("Out of memory"); break; default: return_status = absl::UnknownError(strerror(return_code)); #endif } return absl::Status(return_status); }, retryConfig); std::vector<string> output; if (getaddrinfo_status.ok()) { for (const addrinfo* i = result; i != nullptr; i = i->ai_next) { if (i->ai_family != AF_INET || i->ai_addr->sa_family != AF_INET) { LOG(WARNING) << "Non-IPv4 address returned. ai_family: " << i->ai_family << ". sa_family: " << i->ai_addr->sa_family << "."; continue; } char buf[INET_ADDRSTRLEN]; void* address_ptr = &(reinterpret_cast<sockaddr_in*>(i->ai_addr)->sin_addr); const char* formatted = nullptr; if ((formatted = inet_ntop(i->ai_addr->sa_family, address_ptr, buf, INET_ADDRSTRLEN)) == nullptr) { LOG(ERROR) << "Error converting response to IP address for " << name << ": " << strerror(errno); } else { output.emplace_back(buf); VLOG(1) << "... address: " << buf; } } } else { print_getaddrinfo_error(name, getaddrinfo_status); } if (result != nullptr) { freeaddrinfo(result); } return output; } std::vector<std::vector<string>> GcsDnsCache::ResolveNames( const std::vector<string>& names) { std::vector<std::vector<string>> all_addresses; all_addresses.reserve(names.size()); for (const string& name : names) { all_addresses.push_back(ResolveName(name)); } return all_addresses; } void GcsDnsCache::WorkerThread() { while (true) { { mutex_lock l(mu_); if (cancelled_) return; cond_var_.wait_for(l, std::chrono::seconds(refresh_rate_secs_)); if (cancelled_) return; } auto new_addresses = ResolveNames(kCachedDomainNames); { mutex_lock l(mu_); addresses_.swap(new_addresses); } } } }
#include "tsl/platform/cloud/gcs_dns_cache.h" #include "tsl/platform/str_util.h" #include "tsl/platform/test.h" namespace tsl { class TestHttpRequest : public HttpRequest { public: void SetUri(const string& uri) override {} void SetRange(uint64 start, uint64 end) override {} void AddHeader(const string& name, const string& value) override {} void AddResolveOverride(const string& hostname, int64_t port, const string& ip_addr) override { EXPECT_EQ(port, 443) << "Unexpected port set for hostname: " << hostname; auto itr = resolve_overrides_.find(hostname); EXPECT_EQ(itr, resolve_overrides_.end()) << "Hostname " << hostname << "already in map: " << itr->second; resolve_overrides_.insert( std::map<string, string>::value_type(hostname, ip_addr)); } void AddAuthBearerHeader(const string& auth_token) override {} void SetRequestStats(HttpRequest::RequestStats* stats) override {} void SetDeleteRequest() override {} absl::Status SetPutFromFile(const string& body_filepath, size_t offset) override { return absl::OkStatus(); } void SetPutEmptyBody() override {} void SetPostFromBuffer(const char* buffer, size_t size) override {} void SetPostEmptyBody() override {} void SetResultBuffer(std::vector<char>* out_buffer) override {} void SetResultBufferDirect(char* buffer, size_t size) override {} size_t GetResultBufferDirectBytesTransferred() override { return 0; } string GetResponseHeader(const string& name) const override { return ""; } uint64 GetResponseCode() const override { return 0; } absl::Status Send() override { return absl::OkStatus(); } string EscapeString(const string& str) override { return ""; } void SetTimeouts(uint32 connection, uint32 inactivity, uint32 total) override {} std::map<string, string> resolve_overrides_; }; class GcsDnsCacheTest : public ::testing::Test { protected: void ResolveNameTest() { auto response = GcsDnsCache::ResolveName("www.googleapis.com"); EXPECT_LT(1, response.size()) << absl::StrJoin(response, ", "); } void AnnotateRequestTest() { GcsDnsCache d; { mutex_lock l(d.mu_); d.started_ = true; d.addresses_ = {{"192.168.1.1"}, {"172.134.1.1"}}; } TestHttpRequest req; d.AnnotateRequest(&req); EXPECT_EQ("192.168.1.1", req.resolve_overrides_["www.googleapis.com"]); EXPECT_EQ("172.134.1.1", req.resolve_overrides_["storage.googleapis.com"]); } void SuccessfulCleanupTest() { GcsDnsCache d; TestHttpRequest req; d.AnnotateRequest(&req); } }; TEST_F(GcsDnsCacheTest, AnnotateRequest) { AnnotateRequestTest(); } TEST_F(GcsDnsCacheTest, SuccessfulCleanup) { SuccessfulCleanupTest(); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ea181341-9294-41a6-ba91-a770ca43600d
cpp
tensorflow/tensorflow
compute_engine_zone_provider
third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider_test.cc
#include "tsl/platform/cloud/compute_engine_zone_provider.h" #include <utility> #include "tsl/platform/str_util.h" namespace tsl { namespace { constexpr char kGceMetadataZonePath[] = "instance/zone"; } ComputeEngineZoneProvider::ComputeEngineZoneProvider( std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client) : google_metadata_client_(std::move(google_metadata_client)) {} absl::Status ComputeEngineZoneProvider::GetZone(string* zone) { if (!cached_zone.empty()) { *zone = cached_zone; return absl::OkStatus(); } std::vector<char> response_buffer; TF_RETURN_IF_ERROR(google_metadata_client_->GetMetadata(kGceMetadataZonePath, &response_buffer)); absl::string_view location(&response_buffer[0], response_buffer.size()); std::vector<string> elems = str_util::Split(location, "/"); if (elems.size() == 4) { cached_zone = elems.back(); *zone = cached_zone; } else { LOG(ERROR) << "Failed to parse the zone name from location: " << string(location); } return absl::OkStatus(); } ComputeEngineZoneProvider::~ComputeEngineZoneProvider() {} }
#include "tsl/platform/cloud/compute_engine_zone_provider.h" #include "tsl/platform/cloud/http_request_fake.h" #include "tsl/platform/test.h" namespace tsl { class ComputeEngineZoneProviderTest : public ::testing::Test { protected: void SetUp() override {} void TearDown() override {} }; TEST_F(ComputeEngineZoneProviderTest, GetZone) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: http: "Header Metadata-Flavor: Google\n", "projects/123456789/zones/us-west1-b")}); auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests); auto metadata_client = std::make_shared<ComputeEngineMetadataClient>( httpRequestFactory, RetryConfig(0 )); ComputeEngineZoneProvider provider(metadata_client); string zone; TF_EXPECT_OK(provider.GetZone(&zone)); EXPECT_EQ("us-west1-b", zone); TF_EXPECT_OK(provider.GetZone(&zone)); } TEST_F(ComputeEngineZoneProviderTest, InvalidZoneString) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: http: "Header Metadata-Flavor: Google\n", "invalidresponse")}); auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests); auto metadata_client = std::make_shared<ComputeEngineMetadataClient>( httpRequestFactory, RetryConfig(0 )); ComputeEngineZoneProvider provider(metadata_client); string zone; TF_EXPECT_OK(provider.GetZone(&zone)); EXPECT_EQ("", zone); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bd47393e-8462-41d4-90fc-269f8b471c2c
cpp
tensorflow/tensorflow
compute_engine_metadata_client
third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_metadata_client.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_metadata_client_test.cc
#include "tsl/platform/cloud/compute_engine_metadata_client.h" #include <cstdlib> #include <utility> #include "absl/strings/str_cat.h" #include "tsl/platform/cloud/curl_http_request.h" namespace tsl { namespace { constexpr char kGceMetadataHost[] = "GCE_METADATA_HOST"; constexpr char kGceMetadataBaseUrl[] = "http: } ComputeEngineMetadataClient::ComputeEngineMetadataClient( std::shared_ptr<HttpRequest::Factory> http_request_factory, const RetryConfig& config) : http_request_factory_(std::move(http_request_factory)), retry_config_(config) {} absl::Status ComputeEngineMetadataClient::GetMetadata( const string& path, std::vector<char>* response_buffer) { const auto get_metadata_from_gce = [path, response_buffer, this]() { string metadata_url; const char* metadata_url_override = std::getenv(kGceMetadataHost); if (metadata_url_override) { metadata_url = absl::StrCat("http: "/computeMetadata/v1/"); } else { metadata_url = kGceMetadataBaseUrl; } std::unique_ptr<HttpRequest> request(http_request_factory_->Create()); request->SetUri(metadata_url + path); request->AddHeader("Metadata-Flavor", "Google"); request->SetResultBuffer(response_buffer); TF_RETURN_IF_ERROR(request->Send()); return absl::OkStatus(); }; return RetryingUtils::CallWithRetries(get_metadata_from_gce, retry_config_); } }
#include "tsl/platform/cloud/compute_engine_metadata_client.h" #include "tsl/platform/cloud/http_request_fake.h" #include "tsl/platform/env.h" #include "tsl/platform/test.h" namespace tsl { class ComputeEngineMetadataClientTest : public ::testing::Test { protected: void SetUp() override { ClearEnvVars(); } void TearDown() override { ClearEnvVars(); } void ClearEnvVars() { unsetenv("GCE_METADATA_HOST"); } }; TEST_F(ComputeEngineMetadataClientTest, GetMetadata) { const string example_response = "example response"; std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", example_response)}); std::shared_ptr<HttpRequest::Factory> http_factory = std::make_shared<FakeHttpRequestFactory>(&requests); ComputeEngineMetadataClient client(http_factory, RetryConfig(0 )); std::vector<char> result; TF_EXPECT_OK( client.GetMetadata("instance/service-accounts/default/token", &result)); std::vector<char> expected(example_response.begin(), example_response.end()); EXPECT_EQ(expected, result); } TEST_F(ComputeEngineMetadataClientTest, GetCustomMetadataEndpoint) { const string example_response = "example response"; setenv("GCE_METADATA_HOST", "foo.bar", 1); std::vector<HttpRequest*> requests( {new FakeHttpRequest("Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", example_response)}); std::shared_ptr<HttpRequest::Factory> http_factory = std::make_shared<FakeHttpRequestFactory>(&requests); ComputeEngineMetadataClient client(http_factory, RetryConfig(0 )); std::vector<char> result; TF_EXPECT_OK( client.GetMetadata("instance/service-accounts/default/token", &result)); std::vector<char> expected(example_response.begin(), example_response.end()); EXPECT_EQ(expected, result); } TEST_F(ComputeEngineMetadataClientTest, RetryOnFailure) { const string example_response = "example response"; std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", "", errors::Unavailable("503"), 503), new FakeHttpRequest( "Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", example_response)}); std::shared_ptr<HttpRequest::Factory> http_factory = std::make_shared<FakeHttpRequestFactory>(&requests); ComputeEngineMetadataClient client(http_factory, RetryConfig(0 )); std::vector<char> result; TF_EXPECT_OK( client.GetMetadata("instance/service-accounts/default/token", &result)); std::vector<char> expected(example_response.begin(), example_response.end()); EXPECT_EQ(expected, result); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_metadata_client.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_metadata_client_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7d80d504-3b5e-493a-9924-c14be6ac9cb3
cpp
tensorflow/tensorflow
gcs_file_system
third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system_test.cc
#include "tsl/platform/cloud/gcs_file_system.h" #include <stdio.h> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #ifndef _WIN32 #include <unistd.h> #endif #include <algorithm> #include <cstdio> #include <cstdlib> #include <cstring> #include <fstream> #include <functional> #include <string> #include <utility> #include <vector> #include "tsl/platform/file_statistics.h" #include "tsl/platform/strcat.h" #ifdef _WIN32 #include <io.h> #endif #include "absl/base/macros.h" #include "json/json.h" #include "tsl/platform/cloud/curl_http_request.h" #include "tsl/platform/cloud/file_block_cache.h" #include "tsl/platform/cloud/google_auth_provider.h" #include "tsl/platform/cloud/ram_file_block_cache.h" #include "tsl/platform/cloud/time_util.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/mutex.h" #include "tsl/platform/numbers.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/retrying_utils.h" #include "tsl/platform/str_util.h" #include "tsl/platform/stringprintf.h" #include "tsl/platform/thread_annotations.h" #include "tsl/profiler/lib/traceme.h" #ifdef _WIN32 #ifdef DeleteFile #undef DeleteFile #endif #endif namespace tsl { namespace { constexpr char kGcsUriBase[] = "https: constexpr char kGcsUploadUriBase[] = "https: constexpr char kStorageHost[] = "storage.googleapis.com"; constexpr char kBucketMetadataLocationKey[] = "location"; constexpr size_t kReadAppendableFileBufferSize = 1024 * 1024; constexpr int kGetChildrenDefaultPageSize = 1000; constexpr uint64 HTTP_CODE_RESUME_INCOMPLETE = 308; constexpr uint64 HTTP_CODE_PRECONDITION_FAILED = 412; ABSL_DEPRECATED("Use GCS_READ_CACHE_BLOCK_SIZE_MB instead.") constexpr char kReadaheadBufferSize[] = "GCS_READAHEAD_BUFFER_SIZE_BYTES"; constexpr char kStatCacheMaxAge[] = "GCS_STAT_CACHE_MAX_AGE"; constexpr uint64 kStatCacheDefaultMaxAge = 5; constexpr char kStatCacheMaxEntries[] = "GCS_STAT_CACHE_MAX_ENTRIES"; constexpr size_t kStatCacheDefaultMaxEntries = 1024; constexpr char kMatchingPathsCacheMaxAge[] = "GCS_MATCHING_PATHS_CACHE_MAX_AGE"; constexpr uint64 kMatchingPathsCacheDefaultMaxAge = 0; constexpr char kMatchingPathsCacheMaxEntries[] = "GCS_MATCHING_PATHS_CACHE_MAX_ENTRIES"; constexpr size_t kMatchingPathsCacheDefaultMaxEntries = 1024; constexpr size_t kBucketLocationCacheMaxEntries = 10; constexpr size_t kCacheNeverExpire = std::numeric_limits<uint64>::max(); const FileStatistics DIRECTORY_STAT(0, 0, true); constexpr char kResolveCacheSecs[] = "GCS_RESOLVE_REFRESH_SECS"; constexpr char kRequestConnectionTimeout[] = "GCS_REQUEST_CONNECTION_TIMEOUT_SECS"; constexpr char kRequestIdleTimeout[] = "GCS_REQUEST_IDLE_TIMEOUT_SECS"; constexpr char kMetadataRequestTimeout[] = "GCS_METADATA_REQUEST_TIMEOUT_SECS"; constexpr char kReadRequestTimeout[] = "GCS_READ_REQUEST_TIMEOUT_SECS"; constexpr char kWriteRequestTimeout[] = "GCS_WRITE_REQUEST_TIMEOUT_SECS"; constexpr char kAdditionalRequestHeader[] = "GCS_ADDITIONAL_REQUEST_HEADER"; constexpr char kThrottleRate[] = "GCS_THROTTLE_TOKEN_RATE"; constexpr char kThrottleBucket[] = "GCS_THROTTLE_BUCKET_SIZE"; constexpr char kTokensPerRequest[] = "GCS_TOKENS_PER_REQUEST"; constexpr char kInitialTokens[] = "GCS_INITIAL_TOKENS"; constexpr char kRetryConfigInitialDelayTimeUs[] = "GCS_RETRY_CONFIG_INIT_DELAY_TIME_US"; constexpr char kRetryConfigMaxDelayTimeUs[] = "GCS_RETRY_CONFIG_MAX_DELAY_TIME_US"; constexpr char kRetryConfigMaxRetries[] = "GCS_RETRY_CONFIG_MAX_RETRIES"; constexpr char kAllowedBucketLocations[] = "GCS_ALLOWED_BUCKET_LOCATIONS"; constexpr char kDetectZoneSentinelValue[] = "auto"; constexpr char kAppendMode[] = "GCS_APPEND_MODE"; constexpr char kComposeAppend[] = "compose"; absl::Status GetTmpFilename(string* filename) { *filename = io::GetTempFilename(""); return absl::OkStatus(); } string MaybeAppendSlash(const string& name) { if (name.empty()) { return "/"; } if (name.back() != '/') { return strings::StrCat(name, "/"); } return name; } string JoinGcsPath(const string& path, const string& subpath) { return strings::StrCat(MaybeAppendSlash(path), subpath); } std::set<string> AddAllSubpaths(const std::vector<string>& paths) { std::set<string> result; result.insert(paths.begin(), paths.end()); for (const string& path : paths) { absl::string_view subpath = io::Dirname(path); while (!(subpath.empty() || subpath == "/")) { result.emplace(string(subpath)); subpath = io::Dirname(subpath); } } return result; } absl::Status ParseJson(absl::string_view json, Json::Value* result) { Json::Reader reader; if (!reader.parse(json.data(), json.data() + json.size(), *result)) { return errors::Internal("Couldn't parse JSON response from GCS."); } return absl::OkStatus(); } absl::Status ParseJson(const std::vector<char>& json, Json::Value* result) { return ParseJson(absl::string_view{json.data(), json.size()}, result); } absl::Status GetValue(const Json::Value& parent, const char* name, Json::Value* result) { *result = parent.get(name, Json::Value::null); if (result->isNull()) { return errors::Internal("The field '", name, "' was expected in the JSON response."); } return absl::OkStatus(); } absl::Status GetStringValue(const Json::Value& parent, const char* name, string* result) { Json::Value result_value; TF_RETURN_IF_ERROR(GetValue(parent, name, &result_value)); if (!result_value.isString()) { return errors::Internal( "The field '", name, "' in the JSON response was expected to be a string."); } *result = result_value.asString(); return absl::OkStatus(); } absl::Status GetInt64Value(const Json::Value& parent, const char* name, int64_t* result) { Json::Value result_value; TF_RETURN_IF_ERROR(GetValue(parent, name, &result_value)); if (result_value.isNumeric()) { *result = result_value.asInt64(); return absl::OkStatus(); } if (result_value.isString() && strings::safe_strto64(result_value.asCString(), result)) { return absl::OkStatus(); } return errors::Internal( "The field '", name, "' in the JSON response was expected to be a number."); } absl::Status GetBoolValue(const Json::Value& parent, const char* name, bool* result) { Json::Value result_value; TF_RETURN_IF_ERROR(GetValue(parent, name, &result_value)); if (!result_value.isBool()) { return errors::Internal( "The field '", name, "' in the JSON response was expected to be a boolean."); } *result = result_value.asBool(); return absl::OkStatus(); } RetryConfig GetGcsRetryConfig() { RetryConfig retryConfig( 1000 * 1000, 32 * 1000 * 1000, 10); uint64 init_delay_time_us; if (GetEnvVar(kRetryConfigInitialDelayTimeUs, strings::safe_strtou64, &init_delay_time_us)) { retryConfig.init_delay_time_us = init_delay_time_us; } uint64 max_delay_time_us; if (GetEnvVar(kRetryConfigMaxDelayTimeUs, strings::safe_strtou64, &max_delay_time_us)) { retryConfig.max_delay_time_us = max_delay_time_us; } uint32 max_retries; if (GetEnvVar(kRetryConfigMaxRetries, strings::safe_strtou32, &max_retries)) { retryConfig.max_retries = max_retries; } VLOG(1) << "GCS RetryConfig: " << "init_delay_time_us = " << retryConfig.init_delay_time_us << " ; " << "max_delay_time_us = " << retryConfig.max_delay_time_us << " ; " << "max_retries = " << retryConfig.max_retries; return retryConfig; } class GcsRandomAccessFile : public RandomAccessFile { public: using ReadFn = std::function<absl::Status( const string& filename, uint64 offset, size_t n, absl::string_view* result, char* scratch)>; GcsRandomAccessFile(const string& filename, ReadFn read_fn) : filename_(filename), read_fn_(std::move(read_fn)) {} absl::Status Name(absl::string_view* result) const override { *result = filename_; return absl::OkStatus(); } absl::Status Read(uint64 offset, size_t n, absl::string_view* result, char* scratch) const override { return read_fn_(filename_, offset, n, result, scratch); } private: const string filename_; const ReadFn read_fn_; }; class BufferedGcsRandomAccessFile : public RandomAccessFile { public: using ReadFn = std::function<absl::Status( const string& filename, uint64 offset, size_t n, absl::string_view* result, char* scratch)>; BufferedGcsRandomAccessFile(const string& filename, uint64 buffer_size, ReadFn read_fn) : filename_(filename), read_fn_(std::move(read_fn)), buffer_size_(buffer_size), buffer_start_(0), buffer_end_is_past_eof_(false) {} absl::Status Name(absl::string_view* result) const override { *result = filename_; return absl::OkStatus(); } absl::Status Read(uint64 offset, size_t n, absl::string_view* result, char* scratch) const override { if (n > buffer_size_) { return read_fn_(filename_, offset, n, result, scratch); } { mutex_lock l(buffer_mutex_); size_t buffer_end = buffer_start_ + buffer_.size(); size_t copy_size = 0; if (offset < buffer_end && offset >= buffer_start_) { copy_size = std::min(n, static_cast<size_t>(buffer_end - offset)); memcpy(scratch, buffer_.data() + (offset - buffer_start_), copy_size); *result = absl::string_view(scratch, copy_size); } bool consumed_buffer_to_eof = offset + copy_size >= buffer_end && buffer_end_is_past_eof_; if (copy_size < n && !consumed_buffer_to_eof) { absl::Status status = FillBuffer(offset + copy_size); if (!status.ok() && !absl::IsOutOfRange(status)) { buffer_.resize(0); return status; } size_t remaining_copy = std::min(n - copy_size, buffer_.size()); memcpy(scratch + copy_size, buffer_.data(), remaining_copy); copy_size += remaining_copy; *result = absl::string_view(scratch, copy_size); } if (copy_size < n) { buffer_end_is_past_eof_ = false; return errors::OutOfRange("EOF reached. Requested to read ", n, " bytes from ", offset, "."); } } return absl::OkStatus(); } private: absl::Status FillBuffer(uint64 start) const TF_EXCLUSIVE_LOCKS_REQUIRED(buffer_mutex_) { buffer_start_ = start; buffer_.resize(buffer_size_); absl::string_view str_piece; absl::Status status = read_fn_(filename_, buffer_start_, buffer_size_, &str_piece, &(buffer_[0])); buffer_end_is_past_eof_ = absl::IsOutOfRange(status); buffer_.resize(str_piece.size()); return status; } const string filename_; const ReadFn read_fn_; const uint64 buffer_size_; mutable mutex buffer_mutex_; mutable uint64 buffer_start_ TF_GUARDED_BY(buffer_mutex_); mutable bool buffer_end_is_past_eof_ TF_GUARDED_BY(buffer_mutex_); mutable string buffer_ TF_GUARDED_BY(buffer_mutex_); }; typedef std::function<absl::Status( uint64 start_offset, const std::string& object_to_upload, const std::string& bucket, uint64 file_size, const std::string& gcs_path, UploadSessionHandle* session_handle)> SessionCreator; typedef std::function<absl::Status( const std::string& session_uri, uint64 start_offset, uint64 already_uploaded, const std::string& tmp_content_filename, uint64 file_size, const std::string& file_path)> ObjectUploader; typedef std::function<absl::Status(const string& session_uri, uint64 file_size, const std::string& gcs_path, bool* completed, uint64* uploaded)> StatusPoller; typedef std::function<absl::Status(const string& fname, const string& bucket, const string& object, int64_t* generation)> GenerationGetter; class GcsWritableFile : public WritableFile { public: GcsWritableFile(const string& bucket, const string& object, GcsFileSystem* filesystem, GcsFileSystem::TimeoutConfig* timeouts, std::function<void()> file_cache_erase, RetryConfig retry_config, bool compose_append, SessionCreator session_creator, ObjectUploader object_uploader, StatusPoller status_poller, GenerationGetter generation_getter) : bucket_(bucket), object_(object), filesystem_(filesystem), timeouts_(timeouts), file_cache_erase_(std::move(file_cache_erase)), sync_needed_(true), retry_config_(retry_config), compose_append_(compose_append), start_offset_(0), session_creator_(std::move(session_creator)), object_uploader_(std::move(object_uploader)), status_poller_(std::move(status_poller)), generation_getter_(std::move(generation_getter)) { VLOG(3) << "GcsWritableFile: " << GetGcsPath(); if (GetTmpFilename(&tmp_content_filename_).ok()) { outfile_.open(tmp_content_filename_, std::ofstream::binary | std::ofstream::app); } } GcsWritableFile(const string& bucket, const string& object, GcsFileSystem* filesystem, const string& tmp_content_filename, GcsFileSystem::TimeoutConfig* timeouts, std::function<void()> file_cache_erase, RetryConfig retry_config, bool compose_append, SessionCreator session_creator, ObjectUploader object_uploader, StatusPoller status_poller, GenerationGetter generation_getter) : bucket_(bucket), object_(object), filesystem_(filesystem), timeouts_(timeouts), file_cache_erase_(std::move(file_cache_erase)), sync_needed_(true), retry_config_(retry_config), compose_append_(compose_append), start_offset_(0), session_creator_(std::move(session_creator)), object_uploader_(std::move(object_uploader)), status_poller_(std::move(status_poller)), generation_getter_(std::move(generation_getter)) { VLOG(3) << "GcsWritableFile: " << GetGcsPath() << "with existing file " << tmp_content_filename; tmp_content_filename_ = tmp_content_filename; outfile_.open(tmp_content_filename_, std::ofstream::binary | std::ofstream::app); } ~GcsWritableFile() override { Close().IgnoreError(); std::remove(tmp_content_filename_.c_str()); } absl::Status Append(absl::string_view data) override { TF_RETURN_IF_ERROR(CheckWritable()); VLOG(3) << "Append: " << GetGcsPath() << " size " << data.length(); sync_needed_ = true; outfile_ << data; if (!outfile_.good()) { return errors::Internal( "Could not append to the internal temporary file."); } return absl::OkStatus(); } absl::Status Close() override { VLOG(3) << "Close:" << GetGcsPath(); if (outfile_.is_open()) { absl::Status sync_status = Sync(); if (sync_status.ok()) { outfile_.close(); } return sync_status; } return absl::OkStatus(); } absl::Status Flush() override { VLOG(3) << "Flush:" << GetGcsPath(); return Sync(); } absl::Status Name(absl::string_view* result) const override { *result = object_; return absl::OkStatus(); } absl::Status Sync() override { VLOG(3) << "Sync started:" << GetGcsPath(); TF_RETURN_IF_ERROR(CheckWritable()); if (!sync_needed_) { return absl::OkStatus(); } absl::Status status = SyncImpl(); VLOG(3) << "Sync finished " << GetGcsPath(); if (status.ok()) { sync_needed_ = false; } return status; } absl::Status Tell(int64_t* position) override { *position = outfile_.tellp(); if (*position == -1) { return errors::Internal("tellp on the internal temporary file failed"); } return absl::OkStatus(); } private: absl::Status SyncImpl() { outfile_.flush(); if (!outfile_.good()) { return errors::Internal( "Could not write to the internal temporary file."); } UploadSessionHandle session_handle; uint64 start_offset = 0; string object_to_upload = object_; bool should_compose = false; if (compose_append_) { start_offset = start_offset_; should_compose = start_offset > 0; if (should_compose) { object_to_upload = strings::StrCat(io::Dirname(object_), "/.tmpcompose/", io::Basename(object_), ".", start_offset_); } } TF_RETURN_IF_ERROR(CreateNewUploadSession(start_offset, object_to_upload, &session_handle)); uint64 already_uploaded = 0; bool first_attempt = true; const absl::Status upload_status = RetryingUtils::CallWithRetries( [&first_attempt, &already_uploaded, &session_handle, &start_offset, this]() { if (session_handle.resumable && !first_attempt) { bool completed; TF_RETURN_IF_ERROR(RequestUploadSessionStatus( session_handle.session_uri, &completed, &already_uploaded)); LOG(INFO) << "### RequestUploadSessionStatus: completed = " << completed << ", already_uploaded = " << already_uploaded << ", file = " << GetGcsPath(); if (completed) { file_cache_erase_(); return absl::OkStatus(); } } first_attempt = false; return UploadToSession(session_handle.session_uri, start_offset, already_uploaded); }, retry_config_); if (absl::IsNotFound(upload_status)) { return errors::Unavailable( strings::StrCat("Upload to gs: " failed, caused by: ", upload_status.message())); } if (upload_status.ok()) { if (should_compose) { TF_RETURN_IF_ERROR(AppendObject(object_to_upload)); } TF_RETURN_IF_ERROR(GetCurrentFileSize(&start_offset_)); } return upload_status; } absl::Status CheckWritable() const { if (!outfile_.is_open()) { return errors::FailedPrecondition( "The internal temporary file is not writable."); } return absl::OkStatus(); } absl::Status GetCurrentFileSize(uint64* size) { const auto tellp = outfile_.tellp(); if (tellp == static_cast<std::streampos>(-1)) { return errors::Internal( "Could not get the size of the internal temporary file."); } *size = tellp; return absl::OkStatus(); } absl::Status CreateNewUploadSession(uint64 start_offset, std::string object_to_upload, UploadSessionHandle* session_handle) { uint64 file_size; TF_RETURN_IF_ERROR(GetCurrentFileSize(&file_size)); return session_creator_(start_offset, object_to_upload, bucket_, file_size, GetGcsPath(), session_handle); } absl::Status AppendObject(string append_object) { const string append_object_path = GetGcsPathWithObject(append_object); VLOG(3) << "AppendObject: " << append_object_path << " to " << GetGcsPath(); int64_t generation = 0; TF_RETURN_IF_ERROR( generation_getter_(GetGcsPath(), bucket_, object_, &generation)); TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries( [&append_object, &generation, this]() { std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(filesystem_->CreateHttpRequest(&request)); request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket_, "/o/", request->EscapeString(object_), "/compose")); const string request_body = strings::StrCat( "{'sourceObjects': [{'name': '", object_, "','objectPrecondition':{'ifGenerationMatch':", generation, "}},{'name': '", append_object, "'}]}"); request->SetTimeouts(timeouts_->connect, timeouts_->idle, timeouts_->metadata); request->AddHeader("content-type", "application/json"); request->SetPostFromBuffer(request_body.c_str(), request_body.size()); TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when composing to ", GetGcsPath()); return absl::OkStatus(); }, retry_config_)); return RetryingUtils::DeleteWithRetries( [&append_object_path, this]() { return filesystem_->DeleteFile(append_object_path, nullptr); }, retry_config_); } absl::Status RequestUploadSessionStatus(const string& session_uri, bool* completed, uint64* uploaded) { uint64 file_size; TF_RETURN_IF_ERROR(GetCurrentFileSize(&file_size)); return status_poller_(session_uri, file_size, GetGcsPath(), completed, uploaded); } absl::Status UploadToSession(const string& session_uri, uint64 start_offset, uint64 already_uploaded) { uint64 file_size; TF_RETURN_IF_ERROR(GetCurrentFileSize(&file_size)); absl::Status status = object_uploader_(session_uri, start_offset, already_uploaded, tmp_content_filename_, file_size, GetGcsPath()); if (status.ok()) { file_cache_erase_(); } return status; } string GetGcsPathWithObject(string object) const { return strings::StrCat("gs: } string GetGcsPath() const { return GetGcsPathWithObject(object_); } string bucket_; string object_; GcsFileSystem* const filesystem_; string tmp_content_filename_; std::ofstream outfile_; GcsFileSystem::TimeoutConfig* timeouts_; std::function<void()> file_cache_erase_; bool sync_needed_; RetryConfig retry_config_ = GetGcsRetryConfig(); bool compose_append_; uint64 start_offset_; const SessionCreator session_creator_; const ObjectUploader object_uploader_; const StatusPoller status_poller_; const GenerationGetter generation_getter_; }; class GcsReadOnlyMemoryRegion : public ReadOnlyMemoryRegion { public: GcsReadOnlyMemoryRegion(std::unique_ptr<char[]> data, uint64 length) : data_(std::move(data)), length_(length) {} const void* data() override { return reinterpret_cast<void*>(data_.get()); } uint64 length() override { return length_; } private: std::unique_ptr<char[]> data_; uint64 length_; }; bool StringPieceIdentity(absl::string_view str, absl::string_view* value) { *value = str; return true; } bool SplitByCommaToLowercaseSet(absl::string_view list, std::unordered_set<string>* set) { std::vector<string> vector = absl::StrSplit(absl::AsciiStrToLower(list), ','); *set = std::unordered_set<string>(vector.begin(), vector.end()); return true; } string ZoneToRegion(string* zone) { return zone->substr(0, zone->find_last_of('-')); } } GcsFileSystem::GcsFileSystem(bool make_default_cache) { uint64 value; block_size_ = kDefaultBlockSize; size_t max_bytes = kDefaultMaxCacheSize; uint64 max_staleness = kDefaultMaxStaleness; http_request_factory_ = std::make_shared<CurlHttpRequest::Factory>(); compute_engine_metadata_client_ = std::make_shared<ComputeEngineMetadataClient>(http_request_factory_); auth_provider_ = std::unique_ptr<AuthProvider>( new GoogleAuthProvider(compute_engine_metadata_client_)); zone_provider_ = std::unique_ptr<ZoneProvider>( new ComputeEngineZoneProvider(compute_engine_metadata_client_)); if (GetEnvVar(kReadaheadBufferSize, strings::safe_strtou64, &value)) { block_size_ = value; } if (GetEnvVar(kBlockSize, strings::safe_strtou64, &value)) { block_size_ = value * 1024 * 1024; } if (GetEnvVar(kMaxCacheSize, strings::safe_strtou64, &value)) { max_bytes = value * 1024 * 1024; } if (GetEnvVar(kMaxStaleness, strings::safe_strtou64, &value)) { max_staleness = value; } if (!make_default_cache) { max_bytes = 0; } VLOG(1) << "GCS cache max size = " << max_bytes << " ; " << "block size = " << block_size_ << " ; " << "max staleness = " << max_staleness; file_block_cache_ = MakeFileBlockCache(block_size_, max_bytes, max_staleness); uint64 stat_cache_max_age = kStatCacheDefaultMaxAge; size_t stat_cache_max_entries = kStatCacheDefaultMaxEntries; if (GetEnvVar(kStatCacheMaxAge, strings::safe_strtou64, &value)) { stat_cache_max_age = value; } if (GetEnvVar(kStatCacheMaxEntries, strings::safe_strtou64, &value)) { stat_cache_max_entries = value; } stat_cache_.reset(new ExpiringLRUCache<GcsFileStat>(stat_cache_max_age, stat_cache_max_entries)); uint64 matching_paths_cache_max_age = kMatchingPathsCacheDefaultMaxAge; size_t matching_paths_cache_max_entries = kMatchingPathsCacheDefaultMaxEntries; if (GetEnvVar(kMatchingPathsCacheMaxAge, strings::safe_strtou64, &value)) { matching_paths_cache_max_age = value; } if (GetEnvVar(kMatchingPathsCacheMaxEntries, strings::safe_strtou64, &value)) { matching_paths_cache_max_entries = value; } matching_paths_cache_.reset(new ExpiringLRUCache<std::vector<string>>( matching_paths_cache_max_age, matching_paths_cache_max_entries)); bucket_location_cache_.reset(new ExpiringLRUCache<string>( kCacheNeverExpire, kBucketLocationCacheMaxEntries)); int64_t resolve_frequency_secs; if (GetEnvVar(kResolveCacheSecs, strings::safe_strto64, &resolve_frequency_secs)) { dns_cache_.reset(new GcsDnsCache(resolve_frequency_secs)); VLOG(1) << "GCS DNS cache is enabled. " << kResolveCacheSecs << " = " << resolve_frequency_secs; } else { VLOG(1) << "GCS DNS cache is disabled, because " << kResolveCacheSecs << " = 0 (or is not set)"; } absl::string_view add_header_contents; if (GetEnvVar(kAdditionalRequestHeader, StringPieceIdentity, &add_header_contents)) { size_t split = add_header_contents.find(':', 0); if (split != absl::string_view::npos) { absl::string_view header_name = add_header_contents.substr(0, split); absl::string_view header_value = add_header_contents.substr(split + 1); if (!header_name.empty() && !header_value.empty()) { additional_header_.reset(new std::pair<const string, const string>( string(header_name), string(header_value))); VLOG(1) << "GCS additional header ENABLED. " << "Name: " << additional_header_->first << ", " << "Value: " << additional_header_->second; } else { LOG(ERROR) << "GCS additional header DISABLED. Invalid contents: " << add_header_contents; } } else { LOG(ERROR) << "GCS additional header DISABLED. Invalid contents: " << add_header_contents; } } else { VLOG(1) << "GCS additional header DISABLED. No environment variable set."; } uint32 timeout_value; if (GetEnvVar(kRequestConnectionTimeout, strings::safe_strtou32, &timeout_value)) { timeouts_.connect = timeout_value; } if (GetEnvVar(kRequestIdleTimeout, strings::safe_strtou32, &timeout_value)) { timeouts_.idle = timeout_value; } if (GetEnvVar(kMetadataRequestTimeout, strings::safe_strtou32, &timeout_value)) { timeouts_.metadata = timeout_value; } if (GetEnvVar(kReadRequestTimeout, strings::safe_strtou32, &timeout_value)) { timeouts_.read = timeout_value; } if (GetEnvVar(kWriteRequestTimeout, strings::safe_strtou32, &timeout_value)) { timeouts_.write = timeout_value; } int64_t token_value; if (GetEnvVar(kThrottleRate, strings::safe_strto64, &token_value)) { GcsThrottleConfig config; config.enabled = true; config.token_rate = token_value; if (GetEnvVar(kThrottleBucket, strings::safe_strto64, &token_value)) { config.bucket_size = token_value; } if (GetEnvVar(kTokensPerRequest, strings::safe_strto64, &token_value)) { config.tokens_per_request = token_value; } if (GetEnvVar(kInitialTokens, strings::safe_strto64, &token_value)) { config.initial_tokens = token_value; } throttle_.SetConfig(config); } GetEnvVar(kAllowedBucketLocations, SplitByCommaToLowercaseSet, &allowed_locations_); absl::string_view append_mode; GetEnvVar(kAppendMode, StringPieceIdentity, &append_mode); if (append_mode == kComposeAppend) { compose_append_ = true; } else { compose_append_ = false; } retry_config_ = GetGcsRetryConfig(); } GcsFileSystem::GcsFileSystem( std::unique_ptr<AuthProvider> auth_provider, std::unique_ptr<HttpRequest::Factory> http_request_factory, std::unique_ptr<ZoneProvider> zone_provider, size_t block_size, size_t max_bytes, uint64 max_staleness, uint64 stat_cache_max_age, size_t stat_cache_max_entries, uint64 matching_paths_cache_max_age, size_t matching_paths_cache_max_entries, RetryConfig retry_config, TimeoutConfig timeouts, const std::unordered_set<string>& allowed_locations, std::pair<const string, const string>* additional_header, bool compose_append) : timeouts_(timeouts), retry_config_(retry_config), auth_provider_(std::move(auth_provider)), http_request_factory_(std::move(http_request_factory)), zone_provider_(std::move(zone_provider)), block_size_(block_size), file_block_cache_( MakeFileBlockCache(block_size, max_bytes, max_staleness)), stat_cache_(new StatCache(stat_cache_max_age, stat_cache_max_entries)), matching_paths_cache_(new MatchingPathsCache( matching_paths_cache_max_age, matching_paths_cache_max_entries)), bucket_location_cache_(new BucketLocationCache( kCacheNeverExpire, kBucketLocationCacheMaxEntries)), allowed_locations_(allowed_locations), compose_append_(compose_append), additional_header_(additional_header) {} absl::Status GcsFileSystem::NewRandomAccessFile( const string& fname, TransactionToken* token, std::unique_ptr<RandomAccessFile>* result) { string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object)); TF_RETURN_IF_ERROR(CheckBucketLocationConstraint(bucket)); if (cache_enabled_) { result->reset(new GcsRandomAccessFile(fname, [this, bucket, object]( const string& fname, uint64 offset, size_t n, absl::string_view* result, char* scratch) { tf_shared_lock l(block_cache_lock_); GcsFileStat stat; TF_RETURN_IF_ERROR(stat_cache_->LookupOrCompute( fname, &stat, [this, bucket, object](const string& fname, GcsFileStat* stat) { return UncachedStatForObject(fname, bucket, object, stat); })); if (!file_block_cache_->ValidateAndUpdateFileSignature( fname, stat.generation_number)) { VLOG(1) << "File signature has been changed. Refreshing the cache. Path: " << fname; } *result = absl::string_view(); size_t bytes_transferred; TF_RETURN_IF_ERROR(file_block_cache_->Read(fname, offset, n, scratch, &bytes_transferred)); *result = absl::string_view(scratch, bytes_transferred); if (bytes_transferred < n) { return errors::OutOfRange("EOF reached, ", result->size(), " bytes were read out of ", n, " bytes requested."); } return absl::OkStatus(); })); } else { result->reset(new BufferedGcsRandomAccessFile( fname, block_size_, [this, bucket, object](const string& fname, uint64 offset, size_t n, absl::string_view* result, char* scratch) { *result = absl::string_view(); size_t bytes_transferred; TF_RETURN_IF_ERROR( LoadBufferFromGCS(fname, offset, n, scratch, &bytes_transferred)); *result = absl::string_view(scratch, bytes_transferred); if (bytes_transferred < n) { return errors::OutOfRange("EOF reached, ", result->size(), " bytes were read out of ", n, " bytes requested."); } return absl::OkStatus(); })); } return absl::OkStatus(); } void GcsFileSystem::ResetFileBlockCache(size_t block_size_bytes, size_t max_bytes, uint64 max_staleness_secs) { mutex_lock l(block_cache_lock_); file_block_cache_ = MakeFileBlockCache(block_size_bytes, max_bytes, max_staleness_secs); if (stats_ != nullptr) { stats_->Configure(this, &throttle_, file_block_cache_.get()); } } std::unique_ptr<FileBlockCache> GcsFileSystem::MakeFileBlockCache( size_t block_size, size_t max_bytes, uint64 max_staleness) { std::unique_ptr<FileBlockCache> file_block_cache(new RamFileBlockCache( block_size, max_bytes, max_staleness, [this](const string& filename, size_t offset, size_t n, char* buffer, size_t* bytes_transferred) { return LoadBufferFromGCS(filename, offset, n, buffer, bytes_transferred); })); cache_enabled_ = file_block_cache->IsCacheEnabled(); return file_block_cache; } absl::Status GcsFileSystem::LoadBufferFromGCS(const string& fname, size_t offset, size_t n, char* buffer, size_t* bytes_transferred) { *bytes_transferred = 0; string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object)); profiler::TraceMe activity( [fname]() { return absl::StrCat("LoadBufferFromGCS ", fname); }); std::unique_ptr<HttpRequest> request; TF_RETURN_WITH_CONTEXT_IF_ERROR(CreateHttpRequest(&request), "when reading gs: request->SetUri(strings::StrCat("https: request->EscapeString(object))); request->SetRange(offset, offset + n - 1); request->SetResultBufferDirect(buffer, n); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.read); if (stats_ != nullptr) { stats_->RecordBlockLoadRequest(fname, offset); } TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when reading gs: bucket, "/", object); size_t bytes_read = request->GetResultBufferDirectBytesTransferred(); *bytes_transferred = bytes_read; VLOG(1) << "Successful read of gs: << offset << " of size: " << bytes_read; activity.AppendMetadata([bytes_read]() { return profiler::TraceMeEncode({{"block_size", bytes_read}}); }); if (stats_ != nullptr) { stats_->RecordBlockRetrieved(fname, offset, bytes_read); } throttle_.RecordResponse(bytes_read); if (bytes_read < n) { GcsFileStat stat; if (stat_cache_->Lookup(fname, &stat)) { if (offset + bytes_read < stat.base.length) { return errors::Internal(strings::Printf( "File contents are inconsistent for file: %s @ %lu.", fname.c_str(), offset)); } VLOG(2) << "Successful integrity check for: gs: << object << " @ " << offset; } } return absl::OkStatus(); } absl::Status GcsFileSystem::CreateNewUploadSession( uint64 start_offset, const std::string& object_to_upload, const std::string& bucket, uint64 file_size, const std::string& gcs_path, UploadSessionHandle* session_handle) { std::vector<char> output_buffer; std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); std::string uri = strings::StrCat( kGcsUploadUriBase, "b/", bucket, "/o?uploadType=resumable&name=", request->EscapeString(object_to_upload)); request->SetUri(uri); request->AddHeader("X-Upload-Content-Length", absl::StrCat(file_size - start_offset)); request->SetPostEmptyBody(); request->SetResultBuffer(&output_buffer); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when initiating an upload to ", gcs_path); if (session_handle != nullptr) { session_handle->resumable = true; session_handle->session_uri = request->GetResponseHeader("Location"); if (session_handle->session_uri.empty()) { return errors::Internal("Unexpected response from GCS when writing to ", gcs_path, ": 'Location' header not returned."); } } return absl::OkStatus(); } absl::Status GcsFileSystem::UploadToSession( const std::string& session_uri, uint64 start_offset, uint64 already_uploaded, const std::string& tmp_content_filename, uint64 file_size, const std::string& file_path) { std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); request->SetUri(session_uri); if (file_size > 0) { request->AddHeader("Content-Range", strings::StrCat("bytes ", already_uploaded, "-", file_size - start_offset - 1, "/", file_size - start_offset)); } request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.write); TF_RETURN_IF_ERROR(request->SetPutFromFile(tmp_content_filename, start_offset + already_uploaded)); TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when uploading ", file_path); return absl::OkStatus(); } absl::Status GcsFileSystem::RequestUploadSessionStatus( const string& session_uri, uint64 file_size, const std::string& gcs_path, bool* completed, uint64* uploaded) { CHECK(completed != nullptr) << "RequestUploadSessionStatus() called with out " "param 'completed' == nullptr."; CHECK(uploaded != nullptr) << "RequestUploadSessionStatus() called with out " "param 'uploaded' == nullptr."; std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); request->SetUri(session_uri); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); request->AddHeader("Content-Range", strings::StrCat("bytes */", file_size)); request->SetPutEmptyBody(); absl::Status status = request->Send(); if (status.ok()) { *completed = true; return absl::OkStatus(); } *completed = false; if (request->GetResponseCode() != HTTP_CODE_RESUME_INCOMPLETE) { TF_RETURN_WITH_CONTEXT_IF_ERROR(status, " when resuming upload ", gcs_path); } const std::string received_range = request->GetResponseHeader("Range"); if (received_range.empty()) { *uploaded = 0; } else { absl::string_view range_piece(received_range); absl::ConsumePrefix(&range_piece, "bytes="); auto return_error = [](const std::string& gcs_path, const std::string& error_message) { return errors::Internal("Unexpected response from GCS when writing ", gcs_path, ": ", error_message); }; std::vector<string> range_strs = str_util::Split(range_piece, '-'); if (range_strs.size() != 2) { return return_error(gcs_path, "Range header '" + received_range + "' could not be parsed."); } std::vector<int64_t> range_parts; for (const std::string& range_str : range_strs) { int64_t tmp; if (strings::safe_strto64(range_str, &tmp)) { range_parts.push_back(tmp); } else { return return_error(gcs_path, "Range header '" + received_range + "' could not be parsed."); } } if (range_parts[0] != 0) { return return_error(gcs_path, "The returned range '" + received_range + "' does not start at zero."); } *uploaded = range_parts[1] + 1; } return absl::OkStatus(); } absl::Status GcsFileSystem::ParseGcsPathForScheme(absl::string_view fname, string scheme, bool empty_object_ok, string* bucket, string* object) { absl::string_view parsed_scheme, bucketp, objectp; io::ParseURI(fname, &parsed_scheme, &bucketp, &objectp); if (parsed_scheme != scheme) { return errors::InvalidArgument("GCS path doesn't start with 'gs: fname); } *bucket = string(bucketp); if (bucket->empty() || *bucket == ".") { return errors::InvalidArgument("GCS path doesn't contain a bucket name: ", fname); } absl::ConsumePrefix(&objectp, "/"); *object = string(objectp); if (!empty_object_ok && object->empty()) { return errors::InvalidArgument("GCS path doesn't contain an object name: ", fname); } return absl::OkStatus(); } absl::Status GcsFileSystem::ParseGcsPath(absl::string_view fname, bool empty_object_ok, string* bucket, string* object) { return ParseGcsPathForScheme(fname, "gs", empty_object_ok, bucket, object); } void GcsFileSystem::ClearFileCaches(const string& fname) { tf_shared_lock l(block_cache_lock_); file_block_cache_->RemoveFile(fname); stat_cache_->Delete(fname); } absl::Status GcsFileSystem::NewWritableFile( const string& fname, TransactionToken* token, std::unique_ptr<WritableFile>* result) { string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object)); auto session_creator = [this](uint64 start_offset, const std::string& object_to_upload, const std::string& bucket, uint64 file_size, const std::string& gcs_path, UploadSessionHandle* session_handle) { return CreateNewUploadSession(start_offset, object_to_upload, bucket, file_size, gcs_path, session_handle); }; auto object_uploader = [this](const std::string& session_uri, uint64 start_offset, uint64 already_uploaded, const std::string& tmp_content_filename, uint64 file_size, const std::string& file_path) { return UploadToSession(session_uri, start_offset, already_uploaded, tmp_content_filename, file_size, file_path); }; auto status_poller = [this](const string& session_uri, uint64 file_size, const std::string& gcs_path, bool* completed, uint64* uploaded) { return RequestUploadSessionStatus(session_uri, file_size, gcs_path, completed, uploaded); }; auto generation_getter = [this](const string& fname, const string& bucket, const string& object, int64* generation) { GcsFileStat stat; TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries( [&fname, &bucket, &object, &stat, this]() { return UncachedStatForObject(fname, bucket, object, &stat); }, retry_config_)); *generation = stat.generation_number; return absl::OkStatus(); }; result->reset(new GcsWritableFile( bucket, object, this, &timeouts_, [this, fname]() { ClearFileCaches(fname); }, retry_config_, compose_append_, session_creator, object_uploader, status_poller, generation_getter)); return absl::OkStatus(); } absl::Status GcsFileSystem::NewAppendableFile( const string& fname, TransactionToken* token, std::unique_ptr<WritableFile>* result) { std::unique_ptr<RandomAccessFile> reader; TF_RETURN_IF_ERROR(NewRandomAccessFile(fname, token, &reader)); std::unique_ptr<char[]> buffer(new char[kReadAppendableFileBufferSize]); absl::Status status; uint64 offset = 0; absl::string_view read_chunk; string old_content_filename; TF_RETURN_IF_ERROR(GetTmpFilename(&old_content_filename)); std::ofstream old_content(old_content_filename, std::ofstream::binary); while (true) { status = reader->Read(offset, kReadAppendableFileBufferSize, &read_chunk, buffer.get()); if (status.ok()) { old_content << read_chunk; offset += kReadAppendableFileBufferSize; } else if (status.code() == absl::StatusCode::kNotFound) { break; } else if (status.code() == absl::StatusCode::kOutOfRange) { old_content << read_chunk; break; } else { return status; } } old_content.close(); auto session_creator = [this](uint64 start_offset, const std::string& object_to_upload, const std::string& bucket, uint64 file_size, const std::string& gcs_path, UploadSessionHandle* session_handle) { return CreateNewUploadSession(start_offset, object_to_upload, bucket, file_size, gcs_path, session_handle); }; auto object_uploader = [this](const std::string& session_uri, uint64 start_offset, uint64 already_uploaded, const std::string& tmp_content_filename, uint64 file_size, const std::string& file_path) { return UploadToSession(session_uri, start_offset, already_uploaded, tmp_content_filename, file_size, file_path); }; auto status_poller = [this](const string& session_uri, uint64 file_size, const std::string& gcs_path, bool* completed, uint64* uploaded) { return RequestUploadSessionStatus(session_uri, file_size, gcs_path, completed, uploaded); }; auto generation_getter = [this](const string& fname, const string& bucket, const string& object, int64* generation) { GcsFileStat stat; TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries( [&fname, &bucket, &object, &stat, this]() { return UncachedStatForObject(fname, bucket, object, &stat); }, retry_config_)); *generation = stat.generation_number; return absl::OkStatus(); }; string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object)); result->reset(new GcsWritableFile( bucket, object, this, old_content_filename, &timeouts_, [this, fname]() { ClearFileCaches(fname); }, retry_config_, compose_append_, session_creator, object_uploader, status_poller, generation_getter)); return absl::OkStatus(); } absl::Status GcsFileSystem::NewReadOnlyMemoryRegionFromFile( const string& fname, TransactionToken* token, std::unique_ptr<ReadOnlyMemoryRegion>* result) { uint64 size; TF_RETURN_IF_ERROR(GetFileSize(fname, token, &size)); std::unique_ptr<char[]> data(new char[size]); std::unique_ptr<RandomAccessFile> file; TF_RETURN_IF_ERROR(NewRandomAccessFile(fname, token, &file)); absl::string_view piece; TF_RETURN_IF_ERROR(file->Read(0, size, &piece, data.get())); result->reset(new GcsReadOnlyMemoryRegion(std::move(data), size)); return absl::OkStatus(); } absl::Status GcsFileSystem::FileExists(const string& fname, TransactionToken* token) { string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, true, &bucket, &object)); if (object.empty()) { bool result; TF_RETURN_IF_ERROR(BucketExists(bucket, &result)); if (result) { return absl::OkStatus(); } else { return absl::NotFoundError( absl::StrCat("The specified bucket ", fname, " was not found.")); } } GcsFileStat stat; const absl::Status status = StatForObject(fname, bucket, object, &stat); if (!absl::IsNotFound(status)) { return status; } bool result; TF_RETURN_IF_ERROR(FolderExists(fname, &result)); if (result) { return absl::OkStatus(); } return errors::NotFound("The specified path ", fname, " was not found."); } absl::Status GcsFileSystem::ObjectExists(const string& fname, const string& bucket, const string& object, bool* result) { GcsFileStat stat; const absl::Status status = StatForObject(fname, bucket, object, &stat); switch (static_cast<int>(status.code())) { case static_cast<int>(error::Code::OK): *result = !stat.base.is_directory; return absl::OkStatus(); case static_cast<int>(error::Code::NOT_FOUND): *result = false; return absl::OkStatus(); default: return status; } } absl::Status GcsFileSystem::UncachedStatForObject(const string& fname, const string& bucket, const string& object, GcsFileStat* stat) { std::vector<char> output_buffer; std::unique_ptr<HttpRequest> request; TF_RETURN_WITH_CONTEXT_IF_ERROR(CreateHttpRequest(&request), " when reading metadata of gs: "/", object); request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket, "/o/", request->EscapeString(object), "?fields=size%2Cgeneration%2Cupdated")); request->SetResultBuffer(&output_buffer); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); if (stats_ != nullptr) { stats_->RecordStatObjectRequest(); } TF_RETURN_WITH_CONTEXT_IF_ERROR( request->Send(), " when reading metadata of gs: Json::Value root; TF_RETURN_IF_ERROR(ParseJson(output_buffer, &root)); TF_RETURN_IF_ERROR(GetInt64Value(root, "size", &stat->base.length)); TF_RETURN_IF_ERROR( GetInt64Value(root, "generation", &stat->generation_number)); string updated; TF_RETURN_IF_ERROR(GetStringValue(root, "updated", &updated)); TF_RETURN_IF_ERROR(ParseRfc3339Time(updated, &(stat->base.mtime_nsec))); VLOG(1) << "Stat of: gs: << " length: " << stat->base.length << " generation: " << stat->generation_number << "; mtime_nsec: " << stat->base.mtime_nsec << "; updated: " << updated; if (absl::EndsWith(fname, "/")) { stat->base.is_directory = true; } else { stat->base.is_directory = false; } return absl::OkStatus(); } absl::Status GcsFileSystem::StatForObject(const string& fname, const string& bucket, const string& object, GcsFileStat* stat) { if (object.empty()) { return errors::InvalidArgument(strings::Printf( "'object' must be a non-empty string. (File: %s)", fname.c_str())); } TF_RETURN_IF_ERROR(stat_cache_->LookupOrCompute( fname, stat, [this, &bucket, &object](const string& fname, GcsFileStat* stat) { return UncachedStatForObject(fname, bucket, object, stat); })); return absl::OkStatus(); } absl::Status GcsFileSystem::BucketExists(const string& bucket, bool* result) { const absl::Status status = GetBucketMetadata(bucket, nullptr); switch (static_cast<absl::StatusCode>(status.code())) { case absl::StatusCode::kOk: *result = true; return absl::OkStatus(); case absl::StatusCode::kNotFound: *result = false; return absl::OkStatus(); default: return status; } } absl::Status GcsFileSystem::CheckBucketLocationConstraint( const string& bucket) { if (allowed_locations_.empty()) { return absl::OkStatus(); } if (allowed_locations_.erase(kDetectZoneSentinelValue) == 1) { string zone; TF_RETURN_IF_ERROR(zone_provider_->GetZone(&zone)); allowed_locations_.insert(ZoneToRegion(&zone)); } string location; TF_RETURN_IF_ERROR(GetBucketLocation(bucket, &location)); if (allowed_locations_.find(location) != allowed_locations_.end()) { return absl::OkStatus(); } return errors::FailedPrecondition(strings::Printf( "Bucket '%s' is in '%s' location, allowed locations are: (%s).", bucket.c_str(), location.c_str(), absl::StrJoin(allowed_locations_, ", ").c_str())); } absl::Status GcsFileSystem::GetBucketLocation(const string& bucket, string* location) { auto compute_func = [this](const string& bucket, string* location) { std::vector<char> result_buffer; absl::Status status = GetBucketMetadata(bucket, &result_buffer); Json::Value result; TF_RETURN_IF_ERROR(ParseJson(result_buffer, &result)); string bucket_location; TF_RETURN_IF_ERROR( GetStringValue(result, kBucketMetadataLocationKey, &bucket_location)); *location = absl::AsciiStrToLower(bucket_location); return absl::OkStatus(); }; TF_RETURN_IF_ERROR( bucket_location_cache_->LookupOrCompute(bucket, location, compute_func)); return absl::OkStatus(); } absl::Status GcsFileSystem::GetBucketMetadata( const string& bucket, std::vector<char>* result_buffer) { std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket)); if (result_buffer != nullptr) { request->SetResultBuffer(result_buffer); } request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); return request->Send(); } absl::Status GcsFileSystem::FolderExists(const string& dirname, bool* result) { StatCache::ComputeFunc compute_func = [this](const string& dirname, GcsFileStat* stat) { std::vector<string> children; TF_RETURN_IF_ERROR( GetChildrenBounded(dirname, 1, &children, true , true )); if (!children.empty()) { stat->base = DIRECTORY_STAT; return absl::OkStatus(); } else { return errors::InvalidArgument("Not a directory!"); } }; GcsFileStat stat; absl::Status s = stat_cache_->LookupOrCompute(MaybeAppendSlash(dirname), &stat, compute_func); if (s.ok()) { *result = stat.base.is_directory; return absl::OkStatus(); } if (absl::IsInvalidArgument(s)) { *result = false; return absl::OkStatus(); } return s; } absl::Status GcsFileSystem::GetChildren(const string& dirname, TransactionToken* token, std::vector<string>* result) { return GetChildrenBounded(dirname, UINT64_MAX, result, false , false ); } absl::Status GcsFileSystem::GetMatchingPaths(const string& pattern, TransactionToken* token, std::vector<string>* results) { MatchingPathsCache::ComputeFunc compute_func = [this](const string& pattern, std::vector<string>* results) { results->clear(); const string& fixed_prefix = pattern.substr(0, pattern.find_first_of("*?[\\")); const string dir(this->Dirname(fixed_prefix)); if (dir.empty()) { return errors::InvalidArgument( "A GCS pattern doesn't have a bucket name: ", pattern); } std::vector<string> all_files; TF_RETURN_IF_ERROR(GetChildrenBounded( dir, UINT64_MAX, &all_files, true , false )); const auto& files_and_folders = AddAllSubpaths(all_files); const absl::string_view dir_no_slash = absl::StripSuffix(dir, "/"); for (const auto& path : files_and_folders) { const string full_path = strings::StrCat(dir_no_slash, "/", path); if (this->Match(full_path, pattern)) { results->push_back(full_path); } } return absl::OkStatus(); }; TF_RETURN_IF_ERROR( matching_paths_cache_->LookupOrCompute(pattern, results, compute_func)); return absl::OkStatus(); } absl::Status GcsFileSystem::GetChildrenBounded( const string& dirname, uint64 max_results, std::vector<string>* result, bool recursive, bool include_self_directory_marker) { if (!result) { return errors::InvalidArgument("'result' cannot be null"); } string bucket, object_prefix; TF_RETURN_IF_ERROR( ParseGcsPath(MaybeAppendSlash(dirname), true, &bucket, &object_prefix)); string nextPageToken; uint64 retrieved_results = 0; while (true) { std::vector<char> output_buffer; std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); auto uri = strings::StrCat(kGcsUriBase, "b/", bucket, "/o"); if (recursive) { uri = strings::StrCat(uri, "?fields=items%2Fname%2CnextPageToken"); } else { uri = strings::StrCat(uri, "?fields=items%2Fname%2Cprefixes%2CnextPageToken"); uri = strings::StrCat(uri, "&delimiter=%2F"); } if (!object_prefix.empty()) { uri = strings::StrCat(uri, "&prefix=", request->EscapeString(object_prefix)); } if (!nextPageToken.empty()) { uri = strings::StrCat( uri, "&pageToken=", request->EscapeString(nextPageToken)); } if (max_results - retrieved_results < kGetChildrenDefaultPageSize) { uri = strings::StrCat(uri, "&maxResults=", max_results - retrieved_results); } request->SetUri(uri); request->SetResultBuffer(&output_buffer); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when reading ", dirname); Json::Value root; TF_RETURN_IF_ERROR(ParseJson(output_buffer, &root)); const auto items = root.get("items", Json::Value::null); if (!items.isNull()) { if (!items.isArray()) { return errors::Internal( "Expected an array 'items' in the GCS response."); } for (size_t i = 0; i < items.size(); i++) { const auto item = items.get(i, Json::Value::null); if (!item.isObject()) { return errors::Internal( "Unexpected JSON format: 'items' should be a list of objects."); } string name; TF_RETURN_IF_ERROR(GetStringValue(item, "name", &name)); absl::string_view relative_path(name); if (!absl::ConsumePrefix(&relative_path, object_prefix)) { return errors::Internal(strings::StrCat( "Unexpected response: the returned file name ", name, " doesn't match the prefix ", object_prefix)); } if (!relative_path.empty() || include_self_directory_marker) { result->emplace_back(relative_path); } if (++retrieved_results >= max_results) { return absl::OkStatus(); } } } const auto prefixes = root.get("prefixes", Json::Value::null); if (!prefixes.isNull()) { if (!prefixes.isArray()) { return errors::Internal( "'prefixes' was expected to be an array in the GCS response."); } for (size_t i = 0; i < prefixes.size(); i++) { const auto prefix = prefixes.get(i, Json::Value::null); if (prefix.isNull() || !prefix.isString()) { return errors::Internal( "'prefixes' was expected to be an array of strings in the GCS " "response."); } const string& prefix_str = prefix.asString(); absl::string_view relative_path(prefix_str); if (!absl::ConsumePrefix(&relative_path, object_prefix)) { return errors::Internal( "Unexpected response: the returned folder name ", prefix_str, " doesn't match the prefix ", object_prefix); } result->emplace_back(relative_path); if (++retrieved_results >= max_results) { return absl::OkStatus(); } } } const auto token = root.get("nextPageToken", Json::Value::null); if (token.isNull()) { return absl::OkStatus(); } if (!token.isString()) { return errors::Internal( "Unexpected response: nextPageToken is not a string"); } nextPageToken = token.asString(); } } absl::Status GcsFileSystem::Stat(const string& fname, TransactionToken* token, FileStatistics* stat) { if (!stat) { return errors::Internal("'stat' cannot be nullptr."); } string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, true, &bucket, &object)); if (object.empty()) { bool is_bucket; TF_RETURN_IF_ERROR(BucketExists(bucket, &is_bucket)); if (is_bucket) { *stat = DIRECTORY_STAT; return absl::OkStatus(); } return errors::NotFound("The specified bucket ", fname, " was not found."); } GcsFileStat gcs_stat; const absl::Status status = StatForObject(fname, bucket, object, &gcs_stat); if (status.ok()) { *stat = gcs_stat.base; return absl::OkStatus(); } if (!absl::IsNotFound(status)) { return status; } bool is_folder; TF_RETURN_IF_ERROR(FolderExists(fname, &is_folder)); if (is_folder) { *stat = DIRECTORY_STAT; return absl::OkStatus(); } return errors::NotFound("The specified path ", fname, " was not found."); } absl::Status GcsFileSystem::DeleteFile(const string& fname, TransactionToken* token) { string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object)); std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket, "/o/", request->EscapeString(object))); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); request->SetDeleteRequest(); TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when deleting ", fname); ClearFileCaches(fname); return absl::OkStatus(); } absl::Status GcsFileSystem::CreateDir(const string& dirname, TransactionToken* token) { string dirname_with_slash = MaybeAppendSlash(dirname); VLOG(3) << "CreateDir: creating directory with dirname: " << dirname << " and dirname_with_slash: " << dirname_with_slash; string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(dirname_with_slash, true, &bucket, &object)); if (object.empty()) { bool is_bucket; TF_RETURN_IF_ERROR(BucketExists(bucket, &is_bucket)); return is_bucket ? absl::OkStatus() : errors::NotFound("The specified bucket ", dirname_with_slash, " was not found."); } if (FileExists(dirname_with_slash, token).ok()) { VLOG(3) << "CreateDir: directory already exists, not uploading " << dirname; return errors::AlreadyExists(dirname); } std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); request->SetUri(strings::StrCat( kGcsUploadUriBase, "b/", bucket, "/o?uploadType=media&name=", request->EscapeString(object), "&ifGenerationMatch=0")); request->SetPostEmptyBody(); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); const absl::Status& status = request->Send(); if (status.ok()) { VLOG(3) << "CreateDir: finished uploading directory " << dirname; return absl::OkStatus(); } if (request->GetResponseCode() != HTTP_CODE_PRECONDITION_FAILED) { TF_RETURN_WITH_CONTEXT_IF_ERROR(status, " when uploading ", dirname_with_slash); } VLOG(3) << "Ignoring directory already exists on object " << dirname_with_slash; return errors::AlreadyExists(dirname); } absl::Status GcsFileSystem::DeleteDir(const string& dirname, TransactionToken* token) { std::vector<string> children; TF_RETURN_IF_ERROR( GetChildrenBounded(dirname, 2, &children, true , true )); if (children.size() > 1 || (children.size() == 1 && !children[0].empty())) { return errors::FailedPrecondition("Cannot delete a non-empty directory."); } if (children.size() == 1 && children[0].empty()) { return DeleteFile(MaybeAppendSlash(dirname), token); } return absl::OkStatus(); } absl::Status GcsFileSystem::GetFileSize(const string& fname, TransactionToken* token, uint64* file_size) { if (!file_size) { return errors::Internal("'file_size' cannot be nullptr."); } string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, false, &bucket, &object)); FileStatistics stat; TF_RETURN_IF_ERROR(Stat(fname, token, &stat)); *file_size = stat.length; return absl::OkStatus(); } absl::Status GcsFileSystem::RenameFile(const string& src, const string& target, TransactionToken* token) { if (!IsDirectory(src, token).ok()) { return RenameObject(src, target); } std::vector<string> children; TF_RETURN_IF_ERROR( GetChildrenBounded(src, UINT64_MAX, &children, true , true )); for (const string& subpath : children) { TF_RETURN_IF_ERROR( RenameObject(JoinGcsPath(src, subpath), JoinGcsPath(target, subpath))); } return absl::OkStatus(); } absl::Status GcsFileSystem::RenameObject(const string& src, const string& target) { VLOG(3) << "RenameObject: started gs: string src_bucket, src_object, target_bucket, target_object; TF_RETURN_IF_ERROR(ParseGcsPath(src, false, &src_bucket, &src_object)); TF_RETURN_IF_ERROR( ParseGcsPath(target, false, &target_bucket, &target_object)); std::unique_ptr<HttpRequest> request; TF_RETURN_IF_ERROR(CreateHttpRequest(&request)); request->SetUri(strings::StrCat(kGcsUriBase, "b/", src_bucket, "/o/", request->EscapeString(src_object), "/rewriteTo/b/", target_bucket, "/o/", request->EscapeString(target_object))); request->SetPostEmptyBody(); request->SetTimeouts(timeouts_.connect, timeouts_.idle, timeouts_.metadata); std::vector<char> output_buffer; request->SetResultBuffer(&output_buffer); TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when renaming ", src, " to ", target); ClearFileCaches(target); Json::Value root; TF_RETURN_IF_ERROR(ParseJson(output_buffer, &root)); bool done; TF_RETURN_IF_ERROR(GetBoolValue(root, "done", &done)); if (!done) { return errors::Unimplemented( "Couldn't rename ", src, " to ", target, ": moving large files between buckets with different " "locations or storage classes is not supported."); } VLOG(3) << "RenameObject: finished from: gs: return RetryingUtils::DeleteWithRetries( [this, &src]() { return DeleteFile(src, nullptr); }, retry_config_); } absl::Status GcsFileSystem::IsDirectory(const string& fname, TransactionToken* token) { string bucket, object; TF_RETURN_IF_ERROR(ParseGcsPath(fname, true, &bucket, &object)); if (object.empty()) { bool is_bucket; TF_RETURN_IF_ERROR(BucketExists(bucket, &is_bucket)); if (is_bucket) { return absl::OkStatus(); } return errors::NotFound("The specified bucket gs: " was not found."); } bool is_folder; TF_RETURN_IF_ERROR(FolderExists(fname, &is_folder)); if (is_folder) { return absl::OkStatus(); } bool is_object; TF_RETURN_IF_ERROR(ObjectExists(fname, bucket, object, &is_object)); if (is_object) { return errors::FailedPrecondition("The specified path ", fname, " is not a directory."); } return errors::NotFound("The specified path ", fname, " was not found."); } absl::Status GcsFileSystem::DeleteRecursively(const string& dirname, TransactionToken* token, int64_t* undeleted_files, int64_t* undeleted_dirs) { if (!undeleted_files || !undeleted_dirs) { return errors::Internal( "'undeleted_files' and 'undeleted_dirs' cannot be nullptr."); } *undeleted_files = 0; *undeleted_dirs = 0; if (!IsDirectory(dirname, token).ok()) { *undeleted_dirs = 1; return absl::Status( absl::StatusCode::kNotFound, strings::StrCat(dirname, " doesn't exist or not a directory.")); } std::vector<string> all_objects; TF_RETURN_IF_ERROR(GetChildrenBounded( dirname, UINT64_MAX, &all_objects, true , true )); for (const string& object : all_objects) { const string& full_path = JoinGcsPath(dirname, object); const auto& delete_file_status = RetryingUtils::DeleteWithRetries( [this, &full_path, token]() { return DeleteFile(full_path, token); }, retry_config_); if (!delete_file_status.ok()) { if (IsDirectory(full_path, token).ok()) { (*undeleted_dirs)++; } else { (*undeleted_files)++; } } } return absl::OkStatus(); } void GcsFileSystem::FlushCaches(TransactionToken* token) { tf_shared_lock l(block_cache_lock_); file_block_cache_->Flush(); stat_cache_->Clear(); matching_paths_cache_->Clear(); bucket_location_cache_->Clear(); } void GcsFileSystem::SetStats(GcsStatsInterface* stats) { CHECK(stats_ == nullptr) << "SetStats() has already been called."; CHECK(stats != nullptr); mutex_lock l(block_cache_lock_); stats_ = stats; stats_->Configure(this, &throttle_, file_block_cache_.get()); } void GcsFileSystem::SetCacheStats(FileBlockCacheStatsInterface* cache_stats) { tf_shared_lock l(block_cache_lock_); if (file_block_cache_ == nullptr) { LOG(ERROR) << "Tried to set cache stats of non-initialized file block " "cache object. This may result in not exporting the intended " "monitoring data"; return; } file_block_cache_->SetStats(cache_stats); } void GcsFileSystem::SetAuthProvider( std::unique_ptr<AuthProvider> auth_provider) { mutex_lock l(mu_); auth_provider_ = std::move(auth_provider); } absl::Status GcsFileSystem::CreateHttpRequest( std::unique_ptr<HttpRequest>* request) { std::unique_ptr<HttpRequest> new_request{http_request_factory_->Create()}; if (dns_cache_) { dns_cache_->AnnotateRequest(new_request.get()); } string auth_token; { tf_shared_lock l(mu_); TF_RETURN_IF_ERROR( AuthProvider::GetToken(auth_provider_.get(), &auth_token)); } new_request->AddAuthBearerHeader(auth_token); if (additional_header_) { new_request->AddHeader(additional_header_->first, additional_header_->second); } if (stats_ != nullptr) { new_request->SetRequestStats(stats_->HttpStats()); } if (!throttle_.AdmitRequest()) { return errors::Unavailable("Request throttled"); } *request = std::move(new_request); return absl::OkStatus(); } RetryingGcsFileSystem::RetryingGcsFileSystem() : RetryingFileSystem(std::make_unique<GcsFileSystem>(), RetryConfig(GetGcsRetryConfig())) {} } REGISTER_LEGACY_FILE_SYSTEM("gs", ::tsl::RetryingGcsFileSystem);
#include "tsl/platform/cloud/gcs_file_system.h" #include <fstream> #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/cloud/http_request_fake.h" #include "tsl/platform/errors.h" #include "tsl/platform/str_util.h" #include "tsl/platform/strcat.h" #include "tsl/platform/test.h" #ifdef PLATFORM_WINDOWS #undef DeleteFile #endif namespace tsl { namespace { static GcsFileSystem::TimeoutConfig kTestTimeoutConfig(5, 1, 10, 20, 30); static RetryConfig kTestRetryConfig(0 ); static std::unordered_set<string>* kAllowedLocationsDefault = new std::unordered_set<string>(); static std::unordered_set<string>* kAllowedLocationsAuto = new std::unordered_set<string>({"auto"}); class FakeAuthProvider : public AuthProvider { public: absl::Status GetToken(string* token) override { *token = "fake_token"; return absl::OkStatus(); } }; class FakeZoneProvider : public ZoneProvider { public: absl::Status GetZone(string* zone) override { *zone = "us-east1-b"; return absl::OkStatus(); } }; TEST(GcsFileSystemTest, NewRandomAccessFile_NoBlockCache) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-5\n" "Timeouts: 5 1 20\n", "012345"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 6-11\n" "Timeouts: 5 1 20\n", "6789")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[6]; absl::string_view result; TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("012345", result); EXPECT_TRUE(errors::IsOutOfRange( file->Read(sizeof(scratch), sizeof(scratch), &result, scratch))); EXPECT_EQ("6789", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered) { std::vector<HttpRequest*> requests({ new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-9\n" "Timeouts: 5 1 20\n", "0123456789"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 10-19\n" "Timeouts: 5 1 20\n", ""), }); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[6]; absl::string_view result; TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("012345", result); EXPECT_TRUE(errors::IsOutOfRange( file->Read(sizeof(scratch), sizeof(scratch), &result, scratch))); EXPECT_EQ("6789", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_Errors) { std::vector<HttpRequest*> requests({ new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-9\n" "Timeouts: 5 1 20\n", "Server Not", errors::Unavailable("important HTTP error 308"), nullptr, {}, 308), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 6-15\n" "Timeouts: 5 1 20\n", "123"), }); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[6]; absl::string_view result; EXPECT_TRUE( errors::IsUnavailable(file->Read(0, sizeof(scratch), &result, scratch))); EXPECT_EQ("", result); EXPECT_TRUE(errors::IsOutOfRange( file->Read(sizeof(scratch), sizeof(scratch), &result, scratch))); EXPECT_EQ("123", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_ReadAtEOF) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-9\n" "Timeouts: 5 1 20\n", "0123456789"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 10-19\n" "Timeouts: 5 1 20\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[10]; absl::string_view result; TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("0123456789", result); EXPECT_TRUE(errors::IsOutOfRange( file->Read(sizeof(scratch), sizeof(scratch), &result, scratch))); EXPECT_EQ("", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_CachedOutOfRange) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-9\n" "Timeouts: 5 1 20\n", "012345678")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[5]; absl::string_view result; TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("01234", result); TF_EXPECT_OK(file->Read(4, sizeof(scratch), &result, scratch)); EXPECT_EQ("45678", result); EXPECT_TRUE( errors::IsOutOfRange(file->Read(5, sizeof(scratch), &result, scratch))); EXPECT_EQ("5678", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_CachedNotSequential) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 1-10\n" "Timeouts: 5 1 20\n", "12345678"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-9\n" "Timeouts: 5 1 20\n", "012345678")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[5]; absl::string_view result; TF_EXPECT_OK(file->Read(1, sizeof(scratch), &result, scratch)); EXPECT_EQ("12345", result); TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("01234", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_Growing) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-9\n" "Timeouts: 5 1 20\n", "012345678"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 9-18\n" "Timeouts: 5 1 20\n", "9")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[10]; absl::string_view result; EXPECT_TRUE( errors::IsOutOfRange(file->Read(0, sizeof(scratch), &result, scratch))); EXPECT_EQ("012345678", result); TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("0123456789", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_Buffered_ReadBackwards) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 5-14\n" "Timeouts: 5 1 20\n", "56789"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-9\n" "Timeouts: 5 1 20\n", "0123456789")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 10 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: absl::string_view filename; TF_EXPECT_OK(file->Name(&filename)); EXPECT_EQ(filename, "gs: char scratch[10]; absl::string_view result; EXPECT_TRUE( errors::IsOutOfRange(file->Read(5, sizeof(scratch), &result, scratch))); EXPECT_EQ("56789", result); TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("0123456789", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_WithLocationConstraintInSameLocation) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", R"( { "location":"US-EAST1" })")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsAuto, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: } TEST(GcsFileSystemTest, NewRandomAccessFile_WithLocationConstraintCaching) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", R"( { "location":"US-EAST1" })"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", R"( { "location":"US-EAST1" })"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", R"( { "location":"US-EAST1" })")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsAuto, nullptr , false ); std::unique_ptr<RandomAccessFile> file; string bucket = "gs: string another_bucket = "gs: TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file)); TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file)); TF_EXPECT_OK(fs.NewRandomAccessFile(another_bucket, nullptr, &file)); TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file)); TF_EXPECT_OK(fs.NewRandomAccessFile(another_bucket, nullptr, &file)); fs.FlushCaches(nullptr); TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file)); } TEST(GcsFileSystemTest, NewRandomAccessFile_WithLocationConstraintInDifferentLocation) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", R"( { "location":"BARFOO" })")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsAuto, nullptr , false ); std::unique_ptr<RandomAccessFile> file; EXPECT_EQ( errors::FailedPrecondition( "Bucket 'bucket' is in 'barfoo' location, allowed locations " "are: (us-east1)."), fs.NewRandomAccessFile("gs: } TEST(GcsFileSystemTest, NewRandomAccessFile_NoBlockCache_DifferentN) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-2\n" "Timeouts: 5 1 20\n", "012"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 3-12\n" "Timeouts: 5 1 20\n", "3456789")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: char small_scratch[3]; absl::string_view result; TF_EXPECT_OK(file->Read(0, sizeof(small_scratch), &result, small_scratch)); EXPECT_EQ("012", result); char large_scratch[10]; EXPECT_TRUE(errors::IsOutOfRange(file->Read( sizeof(small_scratch), sizeof(large_scratch), &result, large_scratch))); EXPECT_EQ("3456789", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "random_access.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"15\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-8\n" "Timeouts: 5 1 20\n", "012345678"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 9-17\n" "Timeouts: 5 1 20\n", "9abcde"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 18-26\n" "Timeouts: 5 1 20\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 9 , 18 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); char scratch[100]; absl::string_view result; { std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK(fs.NewRandomAccessFile("gs: nullptr, &file)); scratch[5] = 'x'; TF_EXPECT_OK(file->Read(0, 4, &result, scratch)); EXPECT_EQ("0123", result); EXPECT_EQ(scratch[5], 'x'); TF_EXPECT_OK(file->Read(4, 4, &result, scratch)); EXPECT_EQ("4567", result); TF_EXPECT_OK(file->Read(6, 5, &result, scratch)); EXPECT_EQ("6789a", result); EXPECT_TRUE(errors::IsOutOfRange(file->Read(6, 10, &result, scratch))); EXPECT_EQ("6789abcde", result); EXPECT_TRUE(errors::IsOutOfRange(file->Read(20, 10, &result, scratch))); EXPECT_TRUE(result.empty()); TF_EXPECT_OK(file->Read(0, 4, &result, scratch)); } EXPECT_EQ("0123", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache_Flush) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "random_access.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"15\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-8\n" "Timeouts: 5 1 20\n", "012345678"), new FakeHttpRequest( "Uri: https: "random_access.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"15\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-8\n" "Timeouts: 5 1 20\n", "012345678")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 9 , 18 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); char scratch[100]; absl::string_view result; std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: scratch[5] = 'x'; TF_EXPECT_OK(file->Read(0, 4, &result, scratch)); EXPECT_EQ("0123", result); EXPECT_EQ(scratch[5], 'x'); fs.FlushCaches(nullptr); TF_EXPECT_OK(file->Read(4, 4, &result, scratch)); EXPECT_EQ("4567", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache_MaxStaleness) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "object?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"16\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Range: 0-7\n" "Timeouts: 5 1 20\n", "01234567"), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Range: 8-15\n" "Timeouts: 5 1 20\n", "89abcdef")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 8 , 16 , 3600 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); char scratch[100]; absl::string_view result; for (int i = 0; i < 10; i++) { std::unique_ptr<RandomAccessFile> file1; std::unique_ptr<RandomAccessFile> file2; TF_EXPECT_OK(fs.NewRandomAccessFile("gs: TF_EXPECT_OK(fs.NewRandomAccessFile("gs: TF_EXPECT_OK(file1->Read(0, 8, &result, scratch)); EXPECT_EQ("01234567", result); TF_EXPECT_OK(file2->Read(0, 8, &result, scratch)); EXPECT_EQ("01234567", result); TF_EXPECT_OK(file2->Read(8, 8, &result, scratch)); EXPECT_EQ("89abcdef", result); TF_EXPECT_OK(file1->Read(8, 8, &result, scratch)); EXPECT_EQ("89abcdef", result); } } TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache_FileSignatureChanges) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "random_access.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"5\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-8\n" "Timeouts: 5 1 20\n", "01234"), new FakeHttpRequest( "Uri: https: "random_access.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"5\",\"generation\": \"2\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-8\n" "Timeouts: 5 1 20\n", "43210")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 9 , 18 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: char scratch[5]; absl::string_view result; TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("01234", result); TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("43210", result); } TEST(GcsFileSystemTest, NewRandomAccessFile_NoObjectName) { std::vector<HttpRequest*> requests; GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> file; EXPECT_TRUE(errors::IsInvalidArgument( fs.NewRandomAccessFile("gs: } TEST(GcsFileSystemTest, NewRandomAccessFile_InconsistentRead) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "random_access.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"6\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-5\n" "Timeouts: 5 1 20\n", "012")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 1e3 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat; TF_ASSERT_OK(fs.Stat("gs: std::unique_ptr<RandomAccessFile> file; TF_ASSERT_OK( fs.NewRandomAccessFile("gs: char scratch[6]; absl::string_view result; EXPECT_TRUE( errors::IsInternal(file->Read(0, sizeof(scratch), &result, scratch))); } TEST(GcsFileSystemTest, NewWritableFile) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"16\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-7\n" "Timeouts: 5 1 20\n", "01234567"), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fwriteable\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", ""), new FakeHttpRequest( "Uri: https: "path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"33\",\"generation\": \"2\"," "\"updated\": \"2016-04-29T23:15:34.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-7\n" "Timeouts: 5 1 20\n", "01234567")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 8 , 8 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> rfile; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: char scratch[100]; absl::string_view result; TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch)); EXPECT_EQ("0123", result); std::unique_ptr<WritableFile> wfile; TF_EXPECT_OK( fs.NewWritableFile("gs: TF_EXPECT_OK(wfile->Append("content1,")); int64_t pos; TF_EXPECT_OK(wfile->Tell(&pos)); EXPECT_EQ(9, pos); TF_EXPECT_OK(wfile->Append("content2")); TF_EXPECT_OK(wfile->Flush()); TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch)); EXPECT_EQ("0123", result); TF_EXPECT_OK(wfile->Flush()); TF_EXPECT_OK(wfile->Sync()); TF_EXPECT_OK(wfile->Close()); } TEST(GcsFileSystemTest, NewWritableFile_ResumeUploadSucceeds) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fwriteable.txt\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", "", errors::Unavailable("503"), 503), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Header Content-Range: bytes */17\n" "Put: yes\n", "", errors::Unavailable("308"), nullptr, {{"Range", "0-10"}}, 308), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 11-16/17\n" "Timeouts: 5 1 30\n" "Put body: ntent2\n", "", errors::Unavailable("503"), 503), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Header Content-Range: bytes */17\n" "Put: yes\n", "", errors::Unavailable("308"), nullptr, {{"Range", "bytes=0-12"}}, 308), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 13-16/17\n" "Timeouts: 5 1 30\n" "Put body: ent2\n", "", errors::Unavailable("308"), 308), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Header Content-Range: bytes */17\n" "Put: yes\n", "", errors::Unavailable("308"), nullptr, {{"Range", "bytes=0-14"}}, 308), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 15-16/17\n" "Timeouts: 5 1 30\n" "Put body: t2\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<WritableFile> file; TF_EXPECT_OK( fs.NewWritableFile("gs: TF_EXPECT_OK(file->Append("content1,")); TF_EXPECT_OK(file->Append("content2")); TF_EXPECT_OK(file->Close()); } TEST(GcsFileSystemTest, NewWritableFile_ResumeUploadSucceedsOnGetStatus) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"16\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-7\n" "Timeouts: 5 1 20\n", "01234567"), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fwriteable\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", "", errors::Unavailable("503"), 503), new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Header Content-Range: bytes */17\n" "Put: yes\n", "", absl::OkStatus(), nullptr, {}, 201), new FakeHttpRequest( "Uri: https: "path%2Fwriteable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"33\",\"generation\": \"2\"," "\"updated\": \"2016-04-29T23:19:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-7\n" "Timeouts: 5 1 20\n", "01234567")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 8 , 8 , 3600 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<RandomAccessFile> rfile; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: char scratch[100]; absl::string_view result; TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch)); EXPECT_EQ("0123", result); std::unique_ptr<WritableFile> wfile; TF_EXPECT_OK( fs.NewWritableFile("gs: TF_EXPECT_OK(wfile->Append("content1,")); TF_EXPECT_OK(wfile->Append("content2")); TF_EXPECT_OK(rfile->Read(4, 4, &result, scratch)); EXPECT_EQ("4567", result); TF_EXPECT_OK(wfile->Close()); TF_EXPECT_OK(rfile->Read(0, 8, &result, scratch)); EXPECT_EQ("01234567", result); } TEST(GcsFileSystemTest, NewWritableFile_ResumeUploadAllAttemptsFail) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fwriteable.txt\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", "", errors::Unavailable("503"), 503)}); for (int i = 0; i < 10; i++) { requests.emplace_back( new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Header Content-Range: bytes */17\n" "Put: yes\n", "", errors::Unavailable("important HTTP error 308"), nullptr, {{"Range", "0-10"}}, 308)); requests.emplace_back(new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 11-16/17\n" "Timeouts: 5 1 30\n" "Put body: ntent2\n", "", errors::Unavailable("important HTTP error 503"), 503)); } requests.emplace_back(new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fwriteable.txt\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: requests.emplace_back( new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", "")); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , RetryConfig(2 ), kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<WritableFile> file; TF_EXPECT_OK( fs.NewWritableFile("gs: TF_EXPECT_OK(file->Append("content1,")); TF_EXPECT_OK(file->Append("content2")); const auto& status = file->Close(); EXPECT_TRUE(errors::IsAborted(status)); EXPECT_TRUE( absl::StrContains(status.message(), "All 10 retry attempts failed. The last failure: " "important HTTP error 503")) << status; } TEST(GcsFileSystemTest, NewWritableFile_UploadReturns410) { std::vector<string> results; TF_EXPECT_OK( Env::Default()->GetMatchingPaths("/tmp/tmp_file_tensorflow*", &results)); const int64_t tmp_files_before = results.size(); std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fwriteable.txt\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", "", errors::NotFound("important HTTP error 410"), 410), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fwriteable.txt\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); { std::unique_ptr<WritableFile> file; TF_EXPECT_OK( fs.NewWritableFile("gs: TF_EXPECT_OK(file->Append("content1,")); TF_EXPECT_OK(file->Append("content2")); const auto& status = file->Close(); EXPECT_TRUE(errors::IsUnavailable(status)); EXPECT_TRUE( absl::StrContains(status.message(), "Upload to gs: "caused by: important HTTP error 410")) << status; EXPECT_TRUE(absl::StrContains( status.message(), "when uploading gs: << status; } results.clear(); TF_EXPECT_OK( Env::Default()->GetMatchingPaths("/tmp/tmp_file_tensorflow*", &results)); EXPECT_EQ(tmp_files_before, results.size()); } TEST(GcsFileSystemTest, NewWritableFile_NoObjectName) { std::vector<HttpRequest*> requests; GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<WritableFile> file; EXPECT_TRUE(errors::IsInvalidArgument( fs.NewWritableFile("gs: } TEST(GcsFileSystemTest, NewAppendableFile) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Fappendable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-1048575\n" "Timeouts: 5 1 20\n", "content1,"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-31\n" "Timeouts: 5 1 20\n", "content1,"), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fappendable\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 17\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-16/17\n" "Timeouts: 5 1 30\n" "Put body: content1,content2\n", ""), new FakeHttpRequest( "Uri: https: "path%2Fappendable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"2\"," "\"updated\": \"2016-04-29T23:25:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-31\n" "Timeouts: 5 1 20\n", "01234567")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 32 , 32 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<WritableFile> wfile; TF_EXPECT_OK( fs.NewAppendableFile("gs: TF_EXPECT_OK(wfile->Append("content2")); std::unique_ptr<RandomAccessFile> rfile; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: char scratch[100]; absl::string_view result; TF_EXPECT_OK(rfile->Read(0, 8, &result, scratch)); EXPECT_EQ("content1", result); TF_EXPECT_OK(wfile->Close()); TF_EXPECT_OK(rfile->Read(0, 4, &result, scratch)); EXPECT_EQ("0123", result); } TEST(GcsFileSystemTest, NewAppendableFile_NoObjectName) { std::vector<HttpRequest*> requests; GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<WritableFile> file; EXPECT_TRUE(errors::IsInvalidArgument( fs.NewAppendableFile("gs: } TEST(GcsFileSystemTest, NewAppendableFile_ObjectDoesNotExist) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-1048575\n" "Timeouts: 5 1 20\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "?uploadType=resumable&name=filename\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 0\n" "Post: yes\n" "Timeouts: 5 1 10\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<WritableFile> file; TF_EXPECT_OK(fs.NewAppendableFile("gs: } TEST(GcsFileSystemTest, NewReadOnlyMemoryRegionFromFile) { const string content = "file content"; std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Frandom_access.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"", content.size(), "\"", ", \"generation\": \"1\"", ", \"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( strings::StrCat("Uri: https: "path%2Frandom_access.txt\n" "Auth Token: fake_token\n" "Range: 0-", content.size() - 1, "\n", "Timeouts: 5 1 20\n"), content)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<ReadOnlyMemoryRegion> region; TF_EXPECT_OK(fs.NewReadOnlyMemoryRegionFromFile( "gs: EXPECT_EQ(content, absl::string_view(reinterpret_cast<const char*>(region->data()), region->length())); } TEST(GcsFileSystemTest, NewReadOnlyMemoryRegionFromFile_NoObjectName) { std::vector<HttpRequest*> requests; GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<ReadOnlyMemoryRegion> region; EXPECT_TRUE(errors::IsInvalidArgument( fs.NewReadOnlyMemoryRegionFromFile("gs: } TEST(GcsFileSystemTest, FileExists_YesAsObject) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.FileExists("gs: } TEST(GcsFileSystemTest, FileExists_YesAsFolder) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Fsubfolder?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubfolder%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/subfolder/\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.FileExists("gs: } TEST(GcsFileSystemTest, FileExists_YesAsBucket) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"size\": \"100\"}"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"size\": \"100\"}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.FileExists("gs: TF_EXPECT_OK(fs.FileExists("gs: } TEST(GcsFileSystemTest, FileExists_NotAsObjectOrFolder) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Ffile1.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": []}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_TRUE( errors::IsNotFound(fs.FileExists("gs: } TEST(GcsFileSystemTest, FileExists_NotAsBucket) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_TRUE(absl::IsNotFound(fs.FileExists("gs: EXPECT_TRUE(absl::IsNotFound(fs.FileExists("gs: } TEST(GcsFileSystemTest, FileExists_StatCache) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "path%2Fsubfolder%2F?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubfolder%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/subfolder/\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); for (int i = 0; i < 10; i++) { TF_EXPECT_OK(fs.FileExists("gs: TF_EXPECT_OK(fs.FileExists("gs: } } TEST(GcsFileSystemTest, FileExists_DirectoryMark) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "dir%2F?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"5\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.FileExists("gs: TF_EXPECT_OK(fs.IsDirectory("gs: } TEST(GcsFileSystemTest, GetChildren_NoItems) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix=" "path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"prefixes\": [\"path/subpath/\"]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> children; TF_EXPECT_OK(fs.GetChildren("gs: EXPECT_EQ(std::vector<string>({"subpath/"}), children); } TEST(GcsFileSystemTest, GetChildren_ThreeFiles) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix=" "path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file1.txt\" }," " { \"name\": \"path/file3.txt\" }]," "\"prefixes\": [\"path/subpath/\"]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> children; TF_EXPECT_OK(fs.GetChildren("gs: EXPECT_EQ(std::vector<string>({"file1.txt", "file3.txt", "subpath/"}), children); } TEST(GcsFileSystemTest, GetChildren_SelfDirectoryMarker) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix=" "path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/\" }," " { \"name\": \"path/file3.txt\" }]," "\"prefixes\": [\"path/subpath/\"]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> children; TF_EXPECT_OK(fs.GetChildren("gs: EXPECT_EQ(std::vector<string>({"file3.txt", "subpath/"}), children); } TEST(GcsFileSystemTest, GetChildren_ThreeFiles_NoSlash) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix=" "path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file1.txt\" }," " { \"name\": \"path/file3.txt\" }]," "\"prefixes\": [\"path/subpath/\"]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> children; TF_EXPECT_OK(fs.GetChildren("gs: EXPECT_EQ(std::vector<string>({"file1.txt", "file3.txt", "subpath/"}), children); } TEST(GcsFileSystemTest, GetChildren_Root) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> children; TF_EXPECT_OK(fs.GetChildren("gs: EXPECT_EQ(0, children.size()); } TEST(GcsFileSystemTest, GetChildren_Empty) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&prefix=" "path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> children; TF_EXPECT_OK(fs.GetChildren("gs: EXPECT_EQ(0, children.size()); } TEST(GcsFileSystemTest, GetChildren_Pagination) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&" "prefix=path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"nextPageToken\": \"ABCD==\", " "\"items\": [ " " { \"name\": \"path/file1.txt\" }," " { \"name\": \"path/file3.txt\" }]," "\"prefixes\": [\"path/subpath/\"]}"), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2Cprefixes%2CnextPageToken&delimiter=%2F&" "prefix=path%2F" "&pageToken=ABCD==\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file4.txt\" }," " { \"name\": \"path/file5.txt\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> children; TF_EXPECT_OK(fs.GetChildren("gs: EXPECT_EQ(std::vector<string>({"file1.txt", "file3.txt", "subpath/", "file4.txt", "file5.txt"}), children); } TEST(GcsFileSystemTest, GetMatchingPaths_NoWildcard) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubpath%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/subpath/file2.txt\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: nullptr, &result)); EXPECT_EQ(std::vector<string>({"gs: result); } TEST(GcsFileSystemTest, GetMatchingPaths_BucketAndWildcard) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file1.txt\" }," " { \"name\": \"path/subpath/file2.txt\" }," " { \"name\": \"path/file3.txt\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; TF_EXPECT_OK( fs.GetMatchingPaths("gs: 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; TF_EXPECT_OK( fs.GetMatchingPaths("gs: 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::vector<string> result; EXPECT_TRUE(errors::IsInvalidArgument( fs.GetMatchingPaths("gs:/, 0 , 0 , 0 , 0 , 3600 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); for (int i = 0; i < 10; i++) { std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: nullptr, &result)); EXPECT_EQ(std::vector<string>({"gs: result); TF_EXPECT_OK(fs.GetMatchingPaths("gs: 0 , 0 , 0 , 0 , 3600 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); for (int i = 0; i < 10; i++) { std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: nullptr, &result)); EXPECT_EQ(std::vector<string>({"gs: result); } fs.FlushCaches(nullptr); for (int i = 0; i < 10; i++) { std::vector<string> result; TF_EXPECT_OK(fs.GetMatchingPaths("gs: nullptr, &result)); EXPECT_EQ(std::vector<string>({"gs: result); } } TEST(GcsFileSystemTest, DeleteFile) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-15\n" "Timeouts: 5 1 20\n", "01234567"), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Ffile1.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest( "Uri: https: "path%2Ffile1.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"2\"," "\"updated\": \"2016-04-29T23:19:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-15\n" "Timeouts: 5 1 20\n", "76543210")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 16 , 16 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); char scratch[100]; absl::string_view result; std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: TF_EXPECT_OK(file->Read(0, 8, &result, scratch)); EXPECT_EQ("01234567", result); TF_EXPECT_OK(fs.DeleteFile("gs: TF_EXPECT_OK(file->Read(0, 8, &result, scratch)); EXPECT_EQ("76543210", result); } TEST(GcsFileSystemTest, DeleteFile_NoObjectName) { std::vector<HttpRequest*> requests; GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_TRUE( errors::IsInvalidArgument(fs.DeleteFile("gs: } TEST(GcsFileSystemTest, DeleteFile_StatCacheRemoved) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest("Uri: https: "/bucket/o/file.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=file.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 16 , 16 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat_before_deletion; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(1010, stat_before_deletion.length); TF_EXPECT_OK(fs.DeleteFile("gs: FileStatistics stat_after_deletion; EXPECT_EQ( error::Code::NOT_FOUND, fs.Stat("gs: } TEST(GcsFileSystemTest, DeleteDir_Empty) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F&maxResults=2\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.DeleteDir("gs: } TEST(GcsFileSystemTest, DeleteDir_OnlyDirMarkerLeft) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F&maxResults=2\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/\" }]}"), new FakeHttpRequest("Uri: https: "/bucket/o/path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.DeleteDir("gs: } TEST(GcsFileSystemTest, DeleteDir_BucketOnly) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "name%2CnextPageToken&maxResults=2\nAuth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.DeleteDir("gs: } TEST(GcsFileSystemTest, DeleteDir_NonEmpty) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F&maxResults=2\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file1.txt\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_EQ(error::Code::FAILED_PRECONDITION, fs.DeleteDir("gs: } TEST(GcsFileSystemTest, GetFileSize) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); uint64 size; TF_EXPECT_OK(fs.GetFileSize("gs: EXPECT_EQ(1010, size); } TEST(GcsFileSystemTest, GetFileSize_NoObjectName) { std::vector<HttpRequest*> requests; GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); uint64 size; EXPECT_TRUE(errors::IsInvalidArgument( fs.GetFileSize("gs: } TEST(GcsFileSystemTest, RenameFile_Folder) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path1%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path1/subfolder/file1.txt\" }]}"), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path1%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path1/\" }," " { \"name\": \"path1/subfolder/file1.txt\" }," " { \"name\": \"path1/file2.txt\" }]}"), new FakeHttpRequest( "Uri: https: "path1%2F/rewriteTo/b/bucket/o/path2%2F\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "{\"done\": true}"), new FakeHttpRequest( "Uri: https: "path1%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest( "Uri: https: "path1%2Fsubfolder%2Ffile1.txt/rewriteTo/b/bucket/o/" "path2%2Fsubfolder%2Ffile1.txt\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "{\"done\": true}"), new FakeHttpRequest( "Uri: https: "path1%2Fsubfolder%2Ffile1.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest( "Uri: https: "path1%2Ffile2.txt/rewriteTo/b/bucket/o/path2%2Ffile2.txt\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "{\"done\": true}"), new FakeHttpRequest( "Uri: https: "path1%2Ffile2.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK( fs.RenameFile("gs: } TEST(GcsFileSystemTest, RenameFile_Object) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-15\n" "Timeouts: 5 1 20\n", "01234567"), new FakeHttpRequest( "Uri: https: "path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-15\n" "Timeouts: 5 1 20\n", "76543210"), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "{\"done\": true}"), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"2\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-15\n" "Timeouts: 5 1 20\n", "89abcdef"), new FakeHttpRequest( "Uri: https: "path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"2\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-15\n" "Timeouts: 5 1 20\n", "fedcba98")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 16 , 64 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); char scratch[100]; absl::string_view result; std::unique_ptr<RandomAccessFile> src; std::unique_ptr<RandomAccessFile> dst; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: TF_EXPECT_OK(src->Read(0, 8, &result, scratch)); EXPECT_EQ("01234567", result); TF_EXPECT_OK( fs.NewRandomAccessFile("gs: TF_EXPECT_OK(dst->Read(0, 8, &result, scratch)); EXPECT_EQ("76543210", result); TF_EXPECT_OK(fs.RenameFile("gs: "gs: TF_EXPECT_OK(src->Read(0, 8, &result, scratch)); EXPECT_EQ("89abcdef", result); TF_EXPECT_OK(dst->Read(0, 8, &result, scratch)); EXPECT_EQ("fedcba98", result); } TEST(GcsFileSystemTest, RenameFile_Object_FlushTargetStatCache) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1000\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "{\"done\": true}"), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest( "Uri: https: "path%2Fdst.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat_before_renaming; TF_EXPECT_OK( fs.Stat("gs: EXPECT_EQ(1000, stat_before_renaming.length); TF_EXPECT_OK(fs.RenameFile("gs: "gs: FileStatistics stat_after_renaming; TF_EXPECT_OK( fs.Stat("gs: EXPECT_EQ(1010, stat_after_renaming.length); } TEST(GcsFileSystemTest, RenameFile_Object_DeletionRetried) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "{\"done\": true}"), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "", errors::Unavailable("503"), 503), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "", errors::NotFound("404"), 404)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.RenameFile("gs: "gs: } TEST(GcsFileSystemTest, RenameFile_Object_Incomplete) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsrc.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "path%2Fsrc.txt/rewriteTo/b/bucket/o/path%2Fdst.txt\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "{\"done\": false}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_TRUE(errors::IsUnimplemented(fs.RenameFile( "gs: } TEST(GcsFileSystemTest, Stat_Object) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(1010, stat.length); EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1); EXPECT_FALSE(stat.is_directory); } TEST(GcsFileSystemTest, Stat_Folder) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "subfolder?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"subfolder/\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(0, stat.length); EXPECT_EQ(0, stat.mtime_nsec); EXPECT_TRUE(stat.is_directory); } TEST(GcsFileSystemTest, Stat_ObjectOrFolderNotFound) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "path?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat; EXPECT_EQ(error::Code::NOT_FOUND, fs.Stat("gs: } TEST(GcsFileSystemTest, Stat_Bucket) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(0, stat.length); EXPECT_EQ(0, stat.mtime_nsec); EXPECT_TRUE(stat.is_directory); } TEST(GcsFileSystemTest, Stat_BucketNotFound) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat; EXPECT_EQ(error::Code::NOT_FOUND, fs.Stat("gs: } TEST(GcsFileSystemTest, Stat_Cache) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "subfolder%2F?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"subfolder/\" }]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); for (int i = 0; i < 10; i++) { FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(1010, stat.length); EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1); EXPECT_FALSE(stat.is_directory); TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(0, stat.length); EXPECT_EQ(0, stat.mtime_nsec); EXPECT_TRUE(stat.is_directory); } } TEST(GcsFileSystemTest, Stat_Cache_Flush) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); for (int i = 0; i < 10; i++) { FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(1010, stat.length); EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1); EXPECT_FALSE(stat.is_directory); } fs.FlushCaches(nullptr); for (int i = 0; i < 10; i++) { FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(1010, stat.length); EXPECT_NEAR(1461971724896, stat.mtime_nsec / 1000 / 1000, 1); EXPECT_FALSE(stat.is_directory); } } TEST(GcsFileSystemTest, Stat_FilenameEndingWithSlash) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "dir%2F?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"5\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(5, stat.length); EXPECT_TRUE(stat.is_directory); } TEST(GcsFileSystemTest, IsDirectory_NotFound) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=file.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_EQ(error::Code::NOT_FOUND, fs.IsDirectory("gs: } TEST(GcsFileSystemTest, IsDirectory_NotDirectoryButObject) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=file.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_EQ(error::Code::FAILED_PRECONDITION, fs.IsDirectory("gs: } TEST(GcsFileSystemTest, IsDirectory_Yes) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [{\"name\": \"subfolder/\"}]}"), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=subfolder%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [{\"name\": \"subfolder/\"}]}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.IsDirectory("gs: TF_EXPECT_OK(fs.IsDirectory("gs: } TEST(GcsFileSystemTest, IsDirectory_Bucket) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.IsDirectory("gs: TF_EXPECT_OK(fs.IsDirectory("gs: } TEST(GcsFileSystemTest, IsDirectory_BucketNotFound) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); EXPECT_EQ(error::Code::NOT_FOUND, fs.IsDirectory("gs: } TEST(GcsFileSystemTest, CreateDir_Folder) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "subpath%2F?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "uploadType=media&name=subpath%2F&ifGenerationMatch=0\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", ""), new FakeHttpRequest( "Uri: https: "subpath%2F?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "subpath%2F?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "uploadType=media&name=subpath%2F&ifGenerationMatch=0\n" "Auth Token: fake_token\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", errors::FailedPrecondition("412"), 412), }); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.CreateDir("gs: EXPECT_EQ(errors::AlreadyExists("gs: fs.CreateDir("gs: EXPECT_EQ(errors::AlreadyExists("gs: fs.CreateDir("gs: } TEST(GcsFileSystemTest, CreateDir_Bucket) { std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", ""), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TF_EXPECT_OK(fs.CreateDir("gs: TF_EXPECT_OK(fs.CreateDir("gs: } TEST(GcsFileSystemTest, DeleteRecursively_Ok) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file1.txt\" }]}"), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/\" }," " { \"name\": \"path/file1.txt\" }," " { \"name\": \"path/subpath/file2.txt\" }," " { \"name\": \"path/file3.txt\" }]}"), new FakeHttpRequest("Uri: https: "/bucket/o/path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Ffile1.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "", errors::Unavailable("500"), 500), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Ffile1.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Fsubpath%2Ffile2.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Ffile3.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); int64_t undeleted_files, undeleted_dirs; TF_EXPECT_OK(fs.DeleteRecursively("gs: &undeleted_files, &undeleted_dirs)); EXPECT_EQ(0, undeleted_files); EXPECT_EQ(0, undeleted_dirs); } TEST(GcsFileSystemTest, DeleteRecursively_DeletionErrors) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file1.txt\" }]}"), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{\"items\": [ " " { \"name\": \"path/file1.txt\" }," " { \"name\": \"path/subpath/\" }," " { \"name\": \"path/subpath/file2.txt\" }," " { \"name\": \"path/file3.txt\" }]}"), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Ffile1.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Fsubpath%2F\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Fsubpath%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"items\": [ " " { \"name\": \"path/subpath/\" }]}")), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Fsubpath%2Ffile2.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest("Uri: https: "/bucket/o/path%2Ffile3.txt\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", "", errors::NotFound("404"), 404), new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2Ffile3.txt%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "path%2Ffile3.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); int64_t undeleted_files, undeleted_dirs; TF_EXPECT_OK(fs.DeleteRecursively("gs: &undeleted_files, &undeleted_dirs)); EXPECT_EQ(1, undeleted_files); EXPECT_EQ(1, undeleted_dirs); } TEST(GcsFileSystemTest, DeleteRecursively_NotAFolder) { std::vector<HttpRequest*> requests( { new FakeHttpRequest( "Uri: https: "fields=items%2Fname%2CnextPageToken&prefix=path%2F" "&maxResults=1\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "{}"), new FakeHttpRequest( "Uri: https: "path?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", "", errors::NotFound("404"), 404)}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); int64_t undeleted_files, undeleted_dirs; EXPECT_EQ(error::Code::NOT_FOUND, fs.DeleteRecursively("gs: &undeleted_dirs) .code()); EXPECT_EQ(0, undeleted_files); EXPECT_EQ(1, undeleted_dirs); } TEST(GcsFileSystemTest, NoConstraintsEnvironmentVariableTest) { unsetenv("GCS_ALLOWED_BUCKET_LOCATIONS"); GcsFileSystem fs1; EXPECT_EQ(*kAllowedLocationsDefault, fs1.allowed_locations()); fs1.FlushCaches(nullptr); } TEST(GcsFileSystemTest, BucketLocationConstraintEnvironmentVariableTest) { unsetenv("GCS_ALLOWED_BUCKET_LOCATIONS"); setenv("GCS_ALLOWED_BUCKET_LOCATIONS", "auto", 1); GcsFileSystem fs1; EXPECT_EQ(*kAllowedLocationsAuto, fs1.allowed_locations()); setenv("GCS_ALLOWED_BUCKET_LOCATIONS", "CUSTOM,list", 1); GcsFileSystem fs2; EXPECT_EQ(std::unordered_set<string>({"custom", "list"}), fs2.allowed_locations()); } TEST(GcsFileSystemTest, AdditionalRequestHeaderTest) { GcsFileSystem fs1; EXPECT_EQ("", fs1.additional_header_name()); EXPECT_EQ("", fs1.additional_header_value()); setenv("GCS_ADDITIONAL_REQUEST_HEADER", "X-Add-Header:My Additional Header Value", 1); GcsFileSystem fs2; EXPECT_EQ("X-Add-Header", fs2.additional_header_name()); EXPECT_EQ("My Additional Header Value", fs2.additional_header_value()); setenv("GCS_ADDITIONAL_REQUEST_HEADER", "Someinvalidheadervalue", 1); GcsFileSystem fs3; EXPECT_EQ("", fs3.additional_header_name()); EXPECT_EQ("", fs3.additional_header_value()); setenv("GCS_ADDITIONAL_REQUEST_HEADER", ":thisisinvalid", 1); GcsFileSystem fs4; EXPECT_EQ("", fs4.additional_header_name()); EXPECT_EQ("", fs4.additional_header_value()); setenv("GCS_ADDITIONAL_REQUEST_HEADER", "soisthis:", 1); GcsFileSystem fs5; EXPECT_EQ("", fs5.additional_header_name()); EXPECT_EQ("", fs5.additional_header_value()); setenv("GCS_ADDITIONAL_REQUEST_HEADER", "a:b", 1); GcsFileSystem fs6; EXPECT_EQ("a", fs6.additional_header_name()); EXPECT_EQ("b", fs6.additional_header_value()); auto* add_header = new std::pair<const string, const string>( "mynewheader", "newheadercontents"); std::vector<HttpRequest*> requests( { new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header mynewheader: newheadercontents\n" "Header Hello: world\n", "{}")}); GcsFileSystem fs7( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, add_header , false ); std::unique_ptr<HttpRequest> request; TF_EXPECT_OK(fs7.CreateHttpRequest(&request)); request->SetUri("https: request->AddHeader("Hello", "world"); TF_EXPECT_OK(request->Send()); } TEST(GcsFileSystemTest, OverrideCacheParameters) { setenv("GCS_READ_CACHE_BLOCK_SIZE_MB", "16", 1); setenv("GCS_READ_CACHE_MAX_SIZE_MB", "128", 1); GcsFileSystem fs1; EXPECT_EQ(16 * 1024 * 1024, fs1.block_size()); EXPECT_EQ(128 * 1024 * 1024, fs1.max_bytes()); EXPECT_EQ(0, fs1.max_staleness()); EXPECT_EQ(120, fs1.timeouts().connect); EXPECT_EQ(60, fs1.timeouts().idle); EXPECT_EQ(3600, fs1.timeouts().metadata); EXPECT_EQ(3600, fs1.timeouts().read); EXPECT_EQ(3600, fs1.timeouts().write); unsetenv("GCS_READ_CACHE_BLOCK_SIZE_MB"); setenv("GCS_READAHEAD_BUFFER_SIZE_BYTES", "123456789", 1); GcsFileSystem fs2; EXPECT_EQ(123456789L, fs2.block_size()); setenv("GCS_READ_CACHE_BLOCK_SIZE_MB", "1", 1); setenv("GCS_READ_CACHE_MAX_SIZE_MB", "16", 1); setenv("GCS_READ_CACHE_MAX_STALENESS", "60", 1); GcsFileSystem fs3; EXPECT_EQ(1048576L, fs3.block_size()); EXPECT_EQ(16 * 1024 * 1024, fs3.max_bytes()); EXPECT_EQ(60, fs3.max_staleness()); setenv("GCS_STAT_CACHE_MAX_AGE", "60", 1); setenv("GCS_STAT_CACHE_MAX_ENTRIES", "32", 1); setenv("GCS_MATCHING_PATHS_CACHE_MAX_AGE", "30", 1); setenv("GCS_MATCHING_PATHS_CACHE_MAX_ENTRIES", "64", 1); GcsFileSystem fs4; EXPECT_EQ(60, fs4.stat_cache_max_age()); EXPECT_EQ(32, fs4.stat_cache_max_entries()); EXPECT_EQ(30, fs4.matching_paths_cache_max_age()); EXPECT_EQ(64, fs4.matching_paths_cache_max_entries()); setenv("GCS_REQUEST_CONNECTION_TIMEOUT_SECS", "10", 1); setenv("GCS_REQUEST_IDLE_TIMEOUT_SECS", "5", 1); setenv("GCS_METADATA_REQUEST_TIMEOUT_SECS", "20", 1); setenv("GCS_READ_REQUEST_TIMEOUT_SECS", "30", 1); setenv("GCS_WRITE_REQUEST_TIMEOUT_SECS", "40", 1); GcsFileSystem fs5; EXPECT_EQ(10, fs5.timeouts().connect); EXPECT_EQ(5, fs5.timeouts().idle); EXPECT_EQ(20, fs5.timeouts().metadata); EXPECT_EQ(30, fs5.timeouts().read); EXPECT_EQ(40, fs5.timeouts().write); } TEST(GcsFileSystemTest, CreateHttpRequest) { std::vector<HttpRequest*> requests( { new FakeHttpRequest("Uri: https: "Auth Token: fake_token\n" "Header Hello: world\n", "{}")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<HttpRequest> request; TF_EXPECT_OK(fs.CreateHttpRequest(&request)); request->SetUri("https: request->AddHeader("Hello", "world"); TF_EXPECT_OK(request->Send()); } class TestGcsStats : public GcsStatsInterface { public: void Configure(GcsFileSystem* fs, GcsThrottle* throttle, const FileBlockCache* block_cache) override { CHECK(fs_ == nullptr); CHECK(throttle_ == nullptr); CHECK(block_cache_ == nullptr); fs_ = fs; throttle_ = throttle; block_cache_ = block_cache; } void RecordBlockLoadRequest(const string& file, size_t offset) override { block_load_request_file_ = file; } void RecordBlockRetrieved(const string& file, size_t offset, size_t bytes_transferred) override { block_retrieved_file_ = file; block_retrieved_bytes_transferred_ = bytes_transferred; } void RecordStatObjectRequest() override { stat_object_request_count_++; } HttpRequest::RequestStats* HttpStats() override { return nullptr; } GcsFileSystem* fs_ = nullptr; GcsThrottle* throttle_ = nullptr; const FileBlockCache* block_cache_ = nullptr; string block_load_request_file_; string block_retrieved_file_; size_t block_retrieved_bytes_transferred_ = 0; int stat_object_request_count_ = 0; }; TEST(GcsFileSystemTest, Stat_StatsRecording) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "file.txt?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"1010\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}"))}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TestGcsStats stats; fs.SetStats(&stats); EXPECT_EQ(stats.fs_, &fs); FileStatistics stat; TF_EXPECT_OK(fs.Stat("gs: EXPECT_EQ(1, stats.stat_object_request_count_); } TEST(GcsFileSystemTest, NewRandomAccessFile_StatsRecording) { std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-5\n" "Timeouts: 5 1 20\n", "012345")}); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 0 , 0 , 0 , 0 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); TestGcsStats stats; fs.SetStats(&stats); EXPECT_EQ(stats.fs_, &fs); std::unique_ptr<RandomAccessFile> file; TF_EXPECT_OK( fs.NewRandomAccessFile("gs: char scratch[6]; absl::string_view result; TF_EXPECT_OK(file->Read(0, sizeof(scratch), &result, scratch)); EXPECT_EQ("012345", result); EXPECT_EQ("gs: EXPECT_EQ("gs: EXPECT_EQ(6, stats.block_retrieved_bytes_transferred_); } TEST(GcsFileSystemTest, NewAppendableFile_MultipleFlushesWithCompose) { std::vector<string> contents( {"content0,", "content1,", "content2,", "content3,"}); std::vector<HttpRequest*> requests({ new FakeHttpRequest( "Uri: " "https: "some%2Fpath%2Fappendable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: " "https: "Auth Token: fake_token\n" "Range: 0-1048575\n" "Timeouts: 5 1 20\n", contents[0]), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=some%2Fpath%2Fappendable\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 18\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest( strings::StrCat("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-17/18\n" "Timeouts: 5 1 30\n" "Put body: ", contents[0], contents[1], "\n"), ""), new FakeHttpRequest( "Uri: " "https: "o?uploadType=resumable&name=some%2Fpath%2F.tmpcompose%2Fappendable." "18\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 9\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: "location"}}), new FakeHttpRequest( strings::StrCat("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-8/9\n" "Timeouts: 5 1 30\n" "Put body: ", contents[2], "\n"), ""), new FakeHttpRequest( "Uri: " "https: "some%2Fpath%2Fappendable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"1234\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest("Uri: " "https: "some%2Fpath%2Fappendable/compose\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Header content-type: application/json\n" "Post body: {'sourceObjects': [{'name': " "'some/path/" "appendable','objectPrecondition':{'" "ifGenerationMatch':1234}},{'name': " "'some/path/.tmpcompose/appendable.18'}]}\n", ""), new FakeHttpRequest("Uri: " "https: "some%2Fpath%2F.tmpcompose%2Fappendable.18\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=some%2Fpath%2F.tmpcompose%2Fappendable." "27\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 9\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest( strings::StrCat("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-8/9\n" "Timeouts: 5 1 30\n" "Put body: ", contents[3], "\n"), ""), new FakeHttpRequest( "Uri: " "https: "some%2Fpath%2Fappendable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"4567\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest("Uri: " "https: "some%2Fpath%2Fappendable/compose\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Header content-type: application/json\n" "Post body: {'sourceObjects': [{'name': " "'some/path/" "appendable','objectPrecondition':{'" "ifGenerationMatch':4567}},{'name': " "'some/path/.tmpcompose/appendable.27'}]}\n", ""), new FakeHttpRequest("Uri: " "https: "some%2Fpath%2F.tmpcompose%2Fappendable." "27\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n" "Delete: yes\n", ""), }); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 32 , 32 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , true ); std::unique_ptr<WritableFile> wfile; TF_EXPECT_OK(fs.NewAppendableFile("gs: &wfile)); TF_EXPECT_OK(wfile->Append(contents[1])); TF_EXPECT_OK(wfile->Flush()); TF_EXPECT_OK(wfile->Append(contents[2])); TF_EXPECT_OK(wfile->Flush()); TF_EXPECT_OK(wfile->Append(contents[3])); TF_EXPECT_OK(wfile->Close()); } TEST(GcsFileSystemTest, NewAppendableFile_MultipleFlushesWithoutCompose) { std::vector<string> contents( {"content0,", "content1,", "content2,", "content3,"}); std::vector<HttpRequest*> requests({ new FakeHttpRequest( "Uri: https: "path%2Fappendable?fields=size%2Cgeneration%2Cupdated\n" "Auth Token: fake_token\n" "Timeouts: 5 1 10\n", strings::StrCat("{\"size\": \"8\",\"generation\": \"1\"," "\"updated\": \"2016-04-29T23:15:24.896Z\"}")), new FakeHttpRequest( "Uri: https: "Auth Token: fake_token\n" "Range: 0-1048575\n" "Timeouts: 5 1 20\n", contents[0]), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fappendable\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 18\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest( strings::StrCat("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-17/18\n" "Timeouts: 5 1 30\n" "Put body: ", contents[0], contents[1], "\n"), ""), new FakeHttpRequest("Uri: " "https: "bucket/o?" "uploadType=resumable&name=path%2Fappendable\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 27\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: "location"}}), new FakeHttpRequest( strings::StrCat("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-26/27\n" "Timeouts: 5 1 30\n" "Put body: ", contents[0], contents[1], contents[2], "\n"), ""), new FakeHttpRequest( "Uri: https: "uploadType=resumable&name=path%2Fappendable\n" "Auth Token: fake_token\n" "Header X-Upload-Content-Length: 36\n" "Post: yes\n" "Timeouts: 5 1 10\n", "", {{"Location", "https: new FakeHttpRequest( strings::StrCat("Uri: https: "Auth Token: fake_token\n" "Header Content-Range: bytes 0-35/36\n" "Timeouts: 5 1 30\n" "Put body: ", contents[0], contents[1], contents[2], contents[3], "\n"), ""), }); GcsFileSystem fs( std::unique_ptr<AuthProvider>(new FakeAuthProvider), std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), std::unique_ptr<ZoneProvider>(new FakeZoneProvider), 32 , 32 , 0 , 3600 , 0 , 0 , 0 , kTestRetryConfig, kTestTimeoutConfig, *kAllowedLocationsDefault, nullptr , false ); std::unique_ptr<WritableFile> wfile; TF_EXPECT_OK( fs.NewAppendableFile("gs: TF_EXPECT_OK(wfile->Append(contents[1])); TF_EXPECT_OK(wfile->Flush()); TF_EXPECT_OK(wfile->Append(contents[2])); TF_EXPECT_OK(wfile->Flush()); TF_EXPECT_OK(wfile->Append(contents[3])); TF_EXPECT_OK(wfile->Close()); } TEST(GcsFileSystemTest, AppendModeCompose) { unsetenv("GCS_APPEND_MODE"); setenv("GCS_APPEND_MODE", "compose", 1); GcsFileSystem fs1; EXPECT_EQ(true, fs1.compose_append()); } TEST(GcsFileSystemTest, AppendModeDefault) { unsetenv("GCS_APPEND_MODE"); GcsFileSystem fs1; EXPECT_EQ(false, fs1.compose_append()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_file_system_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1acdc449-82ca-466a-ac4b-e2a65acee19e
cpp
tensorflow/tensorflow
gcs_throttle
third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_throttle.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_throttle_test.cc
#include "tsl/platform/cloud/gcs_throttle.h" #include <algorithm> namespace tsl { namespace { EnvTime* get_default_env_time() { static EnvTime* default_env_time = new EnvTime; return default_env_time; } } GcsThrottle::GcsThrottle(EnvTime* env_time) : last_updated_secs_(env_time ? env_time->GetOverridableNowSeconds() : EnvTime::NowSeconds()), available_tokens_(0), env_time_(env_time ? env_time : get_default_env_time()) {} bool GcsThrottle::AdmitRequest() { mutex_lock l(mu_); UpdateState(); if (available_tokens_ < config_.tokens_per_request) { return false || !config_.enabled; } available_tokens_ -= config_.tokens_per_request; return true; } void GcsThrottle::RecordResponse(size_t num_bytes) { mutex_lock l(mu_); UpdateState(); available_tokens_ -= request_bytes_to_tokens(num_bytes); } void GcsThrottle::SetConfig(GcsThrottleConfig config) { mutex_lock l(mu_); config_ = config; available_tokens_ = config.initial_tokens; last_updated_secs_ = env_time_->GetOverridableNowSeconds(); } void GcsThrottle::UpdateState() { int64_t now = env_time_->GetOverridableNowSeconds(); uint64 delta_secs = std::max(int64_t{0}, now - static_cast<int64_t>(last_updated_secs_)); available_tokens_ += delta_secs * config_.token_rate; available_tokens_ = std::min(available_tokens_, config_.bucket_size); last_updated_secs_ = now; } }
#include "tsl/platform/cloud/gcs_throttle.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/str_util.h" #include "tsl/platform/test.h" namespace tsl { namespace { class TestTime : public EnvTime { public: uint64 GetOverridableNowNanos() const override { return now_micros_ * kMicrosToNanos; } void SetTime(uint64 now_micros) { now_micros_ = now_micros; } void AdvanceSeconds(int64_t secs) { now_micros_ += secs * kSecondsToMicros; } private: uint64 now_micros_ = 1234567890000000ULL; }; class GcsThrottleTest : public ::testing::Test { protected: GcsThrottleTest() : throttle_(&time_) { config_.enabled = true; throttle_.SetConfig(config_); } GcsThrottleConfig config_; TestTime time_; GcsThrottle throttle_; }; TEST_F(GcsThrottleTest, ReplenishTokens) { EXPECT_EQ(0, throttle_.available_tokens()); time_.AdvanceSeconds(1); EXPECT_EQ(100000, throttle_.available_tokens()); time_.AdvanceSeconds(2); EXPECT_EQ(300000, throttle_.available_tokens()); } TEST_F(GcsThrottleTest, RejectRequest) { EXPECT_EQ(0, throttle_.available_tokens()); time_.AdvanceSeconds(1); EXPECT_TRUE(throttle_.AdmitRequest()); EXPECT_EQ(99900, throttle_.available_tokens()); for (int i = 1; i < 1000; i++) { EXPECT_TRUE(throttle_.AdmitRequest()); } EXPECT_FALSE(throttle_.AdmitRequest()); } TEST_F(GcsThrottleTest, MarkResponses) { time_.AdvanceSeconds(1); EXPECT_TRUE(throttle_.AdmitRequest()); throttle_.RecordResponse(128000000); EXPECT_EQ(-25100, throttle_.available_tokens()); EXPECT_FALSE(throttle_.AdmitRequest()); time_.AdvanceSeconds(1); EXPECT_TRUE(throttle_.AdmitRequest()) << "Available tokens: " << throttle_.available_tokens(); } TEST_F(GcsThrottleTest, Skippingtime_) { EXPECT_EQ(0, throttle_.available_tokens()); time_.AdvanceSeconds(90); EXPECT_EQ(9000000, throttle_.available_tokens()); } TEST_F(GcsThrottleTest, BucketLimit) { time_.AdvanceSeconds(120); EXPECT_EQ(10000000, throttle_.available_tokens()); } TEST_F(GcsThrottleTest, ReverseTime) { time_.AdvanceSeconds(1); EXPECT_EQ(100000, throttle_.available_tokens()); time_.AdvanceSeconds(-3600); EXPECT_EQ(100000, throttle_.available_tokens()); time_.AdvanceSeconds(1); EXPECT_EQ(200000, throttle_.available_tokens()); } TEST(GcsThrottleDisabledTest, Disabled) { TestTime time; GcsThrottle throttle(&time); ASSERT_FALSE(throttle.is_enabled()); EXPECT_EQ(0, throttle.available_tokens()); time.AdvanceSeconds(1); EXPECT_EQ(100000, throttle.available_tokens()); EXPECT_TRUE(throttle.AdmitRequest()); EXPECT_EQ(99900, throttle.available_tokens()); time.AdvanceSeconds(1); EXPECT_EQ(199900, throttle.available_tokens()); throttle.RecordResponse(128000000); EXPECT_LT(0, throttle.available_tokens()); EXPECT_TRUE(throttle.AdmitRequest()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_throttle.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_throttle_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
25cb9de2-96c2-423a-906a-e16a2739e1c9
cpp
tensorflow/tensorflow
ram_file_block_cache
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache_test.cc
#include "tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.h" #include <cstring> #include <memory> #include <sstream> #include <utility> #include "absl/synchronization/mutex.h" #include "tensorflow/c/experimental/filesystem/plugins/gcs/cleanup.h" namespace tf_gcs_filesystem { bool RamFileBlockCache::BlockNotStale(const std::shared_ptr<Block>& block) { absl::MutexLock l(&block->mu); if (block->state != FetchState::FINISHED) { return true; } if (max_staleness_ == 0) return true; return timer_seconds_() - block->timestamp <= max_staleness_; } std::shared_ptr<RamFileBlockCache::Block> RamFileBlockCache::Lookup( const Key& key) { absl::MutexLock lock(&mu_); auto entry = block_map_.find(key); if (entry != block_map_.end()) { if (BlockNotStale(entry->second)) { return entry->second; } else { RemoveFile_Locked(key.first); } } auto new_entry = std::make_shared<Block>(); lru_list_.push_front(key); lra_list_.push_front(key); new_entry->lru_iterator = lru_list_.begin(); new_entry->lra_iterator = lra_list_.begin(); new_entry->timestamp = timer_seconds_(); block_map_.emplace(std::make_pair(key, new_entry)); return new_entry; } void RamFileBlockCache::Trim() { while (!lru_list_.empty() && cache_size_ > max_bytes_) { RemoveBlock(block_map_.find(lru_list_.back())); } } void RamFileBlockCache::UpdateLRU(const Key& key, const std::shared_ptr<Block>& block, TF_Status* status) { absl::MutexLock lock(&mu_); if (block->timestamp == 0) { return TF_SetStatus(status, TF_OK, ""); } if (block->lru_iterator != lru_list_.begin()) { lru_list_.erase(block->lru_iterator); lru_list_.push_front(key); block->lru_iterator = lru_list_.begin(); } if (block->data.size() < block_size_) { Key fmax = std::make_pair(key.first, std::numeric_limits<size_t>::max()); auto fcmp = block_map_.upper_bound(fmax); if (fcmp != block_map_.begin() && key < (--fcmp)->first) { return TF_SetStatus(status, TF_INTERNAL, "Block cache contents are inconsistent."); } } Trim(); return TF_SetStatus(status, TF_OK, ""); } void RamFileBlockCache::MaybeFetch(const Key& key, const std::shared_ptr<Block>& block, TF_Status* status) { bool downloaded_block = false; auto reconcile_state = MakeCleanup([this, &downloaded_block, &key, &block] { if (downloaded_block) { absl::MutexLock l(&mu_); if (block->timestamp != 0) { cache_size_ += block->data.capacity(); lra_list_.erase(block->lra_iterator); lra_list_.push_front(key); block->lra_iterator = lra_list_.begin(); block->timestamp = timer_seconds_(); } } }); absl::MutexLock l(&block->mu); TF_SetStatus(status, TF_OK, ""); while (true) { switch (block->state) { case FetchState::ERROR: case FetchState::CREATED: block->state = FetchState::FETCHING; block->mu.Unlock(); block->data.clear(); block->data.resize(block_size_, 0); int64_t bytes_transferred; bytes_transferred = block_fetcher_(key.first, key.second, block_size_, block->data.data(), status); block->mu.Lock(); if (TF_GetCode(status) == TF_OK) { block->data.resize(bytes_transferred, 0); std::vector<char>(block->data).swap(block->data); downloaded_block = true; block->state = FetchState::FINISHED; } else { block->state = FetchState::ERROR; } block->cond_var.SignalAll(); return; case FetchState::FETCHING: block->cond_var.WaitWithTimeout(&block->mu, absl::Minutes(1)); if (block->state == FetchState::FINISHED) { return TF_SetStatus(status, TF_OK, ""); } break; case FetchState::FINISHED: return TF_SetStatus(status, TF_OK, ""); } } return TF_SetStatus( status, TF_INTERNAL, "Control flow should never reach the end of RamFileBlockCache::Fetch."); } int64_t RamFileBlockCache::Read(const std::string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) { if (n == 0) { TF_SetStatus(status, TF_OK, ""); return 0; } if (!IsCacheEnabled() || (n > max_bytes_)) { return block_fetcher_(filename, offset, n, buffer, status); } size_t start = block_size_ * (offset / block_size_); size_t finish = block_size_ * ((offset + n) / block_size_); if (finish < offset + n) { finish += block_size_; } size_t total_bytes_transferred = 0; for (size_t pos = start; pos < finish; pos += block_size_) { Key key = std::make_pair(filename, pos); std::shared_ptr<Block> block = Lookup(key); if (!block) { std::cerr << "No block for key " << key.first << "@" << key.second; abort(); } MaybeFetch(key, block, status); if (TF_GetCode(status) != TF_OK) return -1; UpdateLRU(key, block, status); if (TF_GetCode(status) != TF_OK) return -1; const auto& data = block->data; if (offset >= pos + data.size()) { std::stringstream os; os << "EOF at offset " << offset << " in file " << filename << " at position " << pos << " with data size " << data.size(); TF_SetStatus(status, TF_OUT_OF_RANGE, std::move(os).str().c_str()); return total_bytes_transferred; } auto begin = data.begin(); if (offset > pos) { begin += offset - pos; } auto end = data.end(); if (pos + data.size() > offset + n) { end -= (pos + data.size()) - (offset + n); } if (begin < end) { size_t bytes_to_copy = end - begin; memcpy(&buffer[total_bytes_transferred], &*begin, bytes_to_copy); total_bytes_transferred += bytes_to_copy; } if (data.size() < block_size_) { break; } } TF_SetStatus(status, TF_OK, ""); return total_bytes_transferred; } bool RamFileBlockCache::ValidateAndUpdateFileSignature( const std::string& filename, int64_t file_signature) { absl::MutexLock lock(&mu_); auto it = file_signature_map_.find(filename); if (it != file_signature_map_.end()) { if (it->second == file_signature) { return true; } RemoveFile_Locked(filename); it->second = file_signature; return false; } file_signature_map_[filename] = file_signature; return true; } size_t RamFileBlockCache::CacheSize() const { absl::MutexLock lock(&mu_); return cache_size_; } void RamFileBlockCache::Prune() { while (!stop_pruning_thread_.WaitForNotificationWithTimeout( absl::Microseconds(1000000))) { absl::MutexLock lock(&mu_); uint64_t now = timer_seconds_(); while (!lra_list_.empty()) { auto it = block_map_.find(lra_list_.back()); if (now - it->second->timestamp <= max_staleness_) { break; } RemoveFile_Locked(std::string(it->first.first)); } } } void RamFileBlockCache::Flush() { absl::MutexLock lock(&mu_); block_map_.clear(); lru_list_.clear(); lra_list_.clear(); cache_size_ = 0; } void RamFileBlockCache::RemoveFile(const std::string& filename) { absl::MutexLock lock(&mu_); RemoveFile_Locked(filename); } void RamFileBlockCache::RemoveFile_Locked(const std::string& filename) { Key begin = std::make_pair(filename, 0); auto it = block_map_.lower_bound(begin); while (it != block_map_.end() && it->first.first == filename) { auto next = std::next(it); RemoveBlock(it); it = next; } } void RamFileBlockCache::RemoveBlock(BlockMap::iterator entry) { entry->second->timestamp = 0; lru_list_.erase(entry->second->lru_iterator); lra_list_.erase(entry->second->lra_iterator); cache_size_ -= entry->second->data.capacity(); block_map_.erase(entry); } }
#include "tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.h" #include <cstring> #include "tensorflow/c/tf_status.h" #include "tensorflow/c/tf_status_internal.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/blocking_counter.h" #include "tensorflow/core/platform/cloud/now_seconds_env.h" #include "tensorflow/core/platform/notification.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { Status ReadCache(tf_gcs_filesystem::RamFileBlockCache* cache, const string& filename, size_t offset, size_t n, std::vector<char>* out) { out->clear(); out->resize(n, 0); TF_Status status; auto bytes_transferred = cache->Read(filename, offset, n, out->data(), &status); if (bytes_transferred >= 0) { EXPECT_LE(bytes_transferred, n); out->resize(bytes_transferred, n); } return status.status; } TEST(RamFileBlockCacheTest, IsCacheEnabled) { auto fetcher = [](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { TF_SetStatus(status, TF_OK, ""); return 0; }; tf_gcs_filesystem::RamFileBlockCache cache1(0, 0, 0, fetcher); tf_gcs_filesystem::RamFileBlockCache cache2(16, 0, 0, fetcher); tf_gcs_filesystem::RamFileBlockCache cache3(0, 32, 0, fetcher); tf_gcs_filesystem::RamFileBlockCache cache4(16, 32, 0, fetcher); EXPECT_FALSE(cache1.IsCacheEnabled()); EXPECT_FALSE(cache2.IsCacheEnabled()); EXPECT_FALSE(cache3.IsCacheEnabled()); EXPECT_TRUE(cache4.IsCacheEnabled()); } TEST(RamFileBlockCacheTest, ValidateAndUpdateFileSignature) { int calls = 0; auto fetcher = [&calls](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { calls++; memset(buffer, 'x', n); TF_SetStatus(status, TF_OK, ""); return n; }; string filename = "file"; tf_gcs_filesystem::RamFileBlockCache cache(16, 32, 0, fetcher); std::vector<char> out; EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123)); TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out)); EXPECT_EQ(calls, 1); EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123)); TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out)); EXPECT_EQ(calls, 1); EXPECT_FALSE(cache.ValidateAndUpdateFileSignature(filename, 321)); TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out)); EXPECT_EQ(calls, 2); } TEST(RamFileBlockCacheTest, PassThrough) { const string want_filename = "foo/bar"; const size_t want_offset = 42; const size_t want_n = 1024; int calls = 0; auto fetcher = [&calls, want_filename, want_offset, want_n]( const string& got_filename, size_t got_offset, size_t got_n, char* buffer, TF_Status* status) -> int64_t { EXPECT_EQ(got_filename, want_filename); EXPECT_EQ(got_offset, want_offset); EXPECT_EQ(got_n, want_n); calls++; memset(buffer, 'x', got_n); TF_SetStatus(status, TF_OK, ""); return got_n; }; tf_gcs_filesystem::RamFileBlockCache cache1(1, 0, 0, fetcher); tf_gcs_filesystem::RamFileBlockCache cache2(0, 1, 0, fetcher); tf_gcs_filesystem::RamFileBlockCache cache3(0, 0, 0, fetcher); tf_gcs_filesystem::RamFileBlockCache cache4(1000, 1000, 0, fetcher); std::vector<char> out; TF_EXPECT_OK(ReadCache(&cache1, want_filename, want_offset, want_n, &out)); EXPECT_EQ(calls, 1); TF_EXPECT_OK(ReadCache(&cache2, want_filename, want_offset, want_n, &out)); EXPECT_EQ(calls, 2); TF_EXPECT_OK(ReadCache(&cache3, want_filename, want_offset, want_n, &out)); EXPECT_EQ(calls, 3); TF_EXPECT_OK(ReadCache(&cache4, want_filename, want_offset, want_n, &out)); EXPECT_EQ(calls, 4); } TEST(RamFileBlockCacheTest, BlockAlignment) { const size_t size = 256; std::vector<char> buf; buf.reserve(size); for (int i = 0; i < size; i++) { buf.push_back(i); } auto fetcher = [&buf](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { int64_t bytes_transferred; if (offset < buf.size()) { size_t bytes_to_copy = std::min<size_t>(buf.size() - offset, n); memcpy(buffer, buf.data() + offset, bytes_to_copy); bytes_transferred = bytes_to_copy; } else { bytes_transferred = 0; } TF_SetStatus(status, TF_OK, ""); return bytes_transferred; }; for (size_t block_size = 2; block_size <= 4; block_size++) { tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0, fetcher); for (size_t offset = 0; offset < 10; offset++) { for (size_t n = block_size - 2; n <= block_size + 2; n++) { std::vector<char> got; TF_EXPECT_OK(ReadCache(&cache, "", offset, n, &got)); if (offset + n <= size) { EXPECT_EQ(got.size(), n) << "block size = " << block_size << ", offset = " << offset << ", n = " << n; } else { EXPECT_EQ(got.size(), size - offset) << "block size = " << block_size << ", offset = " << offset << ", n = " << n; } std::vector<char>::const_iterator begin = buf.begin() + offset; std::vector<char>::const_iterator end = offset + n > buf.size() ? buf.end() : begin + n; std::vector<char> want(begin, end); EXPECT_EQ(got, want) << "block size = " << block_size << ", offset = " << offset << ", n = " << n; } } } } TEST(RamFileBlockCacheTest, CacheHits) { const size_t block_size = 16; std::set<size_t> calls; auto fetcher = [&calls, block_size](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { EXPECT_EQ(n, block_size); EXPECT_EQ(offset % block_size, 0); EXPECT_EQ(calls.find(offset), calls.end()) << "at offset " << offset; calls.insert(offset); memset(buffer, 'x', n); TF_SetStatus(status, TF_OK, ""); return n; }; const uint32 block_count = 256; tf_gcs_filesystem::RamFileBlockCache cache( block_size, block_count * block_size, 0, fetcher); std::vector<char> out; out.resize(block_count, 0); for (int i = 0; i < 2; i++) { for (int j = 0; j < block_count; j++) { TF_EXPECT_OK(ReadCache(&cache, "", block_size * j, block_size, &out)); } } } TEST(RamFileBlockCacheTest, OutOfRange) { const size_t block_size = 16; const size_t file_size = 24; bool first_block = false; bool second_block = false; auto fetcher = [block_size, file_size, &first_block, &second_block]( const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { EXPECT_EQ(n, block_size); EXPECT_EQ(offset % block_size, 0); size_t bytes_to_copy = 0; if (offset == 0) { memset(buffer, 'x', n); bytes_to_copy = n; first_block = true; } else if (offset == block_size) { bytes_to_copy = file_size - block_size; memset(buffer, 'x', bytes_to_copy); second_block = true; } TF_SetStatus(status, TF_OK, ""); return bytes_to_copy; }; tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0, fetcher); std::vector<char> out; TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size, &out)); EXPECT_TRUE(first_block); EXPECT_EQ(out.size(), block_size); Status status = ReadCache(&cache, "", file_size + 4, 4, &out); EXPECT_EQ(status.code(), error::OUT_OF_RANGE); EXPECT_TRUE(second_block); second_block = false; TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out)); EXPECT_FALSE(second_block); EXPECT_EQ(out.size(), file_size - block_size); } TEST(RamFileBlockCacheTest, Inconsistent) { const size_t block_size = 16; auto fetcher = [block_size](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { EXPECT_EQ(n, block_size); EXPECT_EQ(offset % block_size, 0); EXPECT_GE(n, 1); memset(buffer, 'x', 1); TF_SetStatus(status, TF_OK, ""); return 1; }; tf_gcs_filesystem::RamFileBlockCache cache(block_size, 2 * block_size, 0, fetcher); std::vector<char> out; TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out)); EXPECT_EQ(out.size(), 1); Status status = ReadCache(&cache, "", 0, block_size, &out); EXPECT_EQ(status.code(), error::INTERNAL); } TEST(RamFileBlockCacheTest, LRU) { const size_t block_size = 16; std::list<size_t> calls; auto fetcher = [&calls, block_size](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { EXPECT_EQ(n, block_size); EXPECT_FALSE(calls.empty()) << "at offset = " << offset; if (!calls.empty()) { EXPECT_EQ(offset, calls.front()); calls.pop_front(); } memset(buffer, 'x', n); TF_SetStatus(status, TF_OK, ""); return n; }; const uint32 block_count = 2; tf_gcs_filesystem::RamFileBlockCache cache( block_size, block_count * block_size, 0, fetcher); std::vector<char> out; calls.push_back(0); TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out)); TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out)); calls.push_back(block_size); TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out)); TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out)); calls.push_back(2 * block_size); TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out)); TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out)); calls.push_back(0); TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out)); TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out)); calls.push_back(block_size); TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out)); calls.push_back(0); TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out)); } TEST(RamFileBlockCacheTest, MaxStaleness) { int calls = 0; auto fetcher = [&calls](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { calls++; memset(buffer, 'x', n); TF_SetStatus(status, TF_OK, ""); return n; }; std::vector<char> out; std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv); tf_gcs_filesystem::RamFileBlockCache cache1( 8, 16, 2 , fetcher, [&env]() { return env->NowSeconds(); }); TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out)); EXPECT_EQ(calls, 1); for (int i = 1; i <= 10; i++) { env->SetNowSeconds(i + 1); TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out)); EXPECT_EQ(calls, 1 + i / 3); } calls = 0; env->SetNowSeconds(0); tf_gcs_filesystem::RamFileBlockCache cache2( 8, 16, 0 , fetcher, [&env]() { return env->NowSeconds(); }); TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out)); EXPECT_EQ(calls, 1); env->SetNowSeconds(365 * 24 * 60 * 60); TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out)); EXPECT_EQ(calls, 1); } TEST(RamFileBlockCacheTest, RemoveFile) { int calls = 0; auto fetcher = [&calls](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { calls++; char c = (filename == "a") ? 'a' : (filename == "b") ? 'b' : 'x'; if (offset > 0) { c = toupper(c); } memset(buffer, c, n); TF_SetStatus(status, TF_OK, ""); return n; }; const size_t n = 3; tf_gcs_filesystem::RamFileBlockCache cache(8, 32, 0, fetcher); std::vector<char> out; std::vector<char> a(n, 'a'); std::vector<char> b(n, 'b'); std::vector<char> A(n, 'A'); std::vector<char> B(n, 'B'); TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out)); EXPECT_EQ(out, a); EXPECT_EQ(calls, 1); TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out)); EXPECT_EQ(out, A); EXPECT_EQ(calls, 2); TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out)); EXPECT_EQ(out, b); EXPECT_EQ(calls, 3); TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out)); EXPECT_EQ(out, B); EXPECT_EQ(calls, 4); TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out)); EXPECT_EQ(out, a); TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out)); EXPECT_EQ(out, A); TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out)); EXPECT_EQ(out, b); TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out)); EXPECT_EQ(out, B); EXPECT_EQ(calls, 4); cache.RemoveFile("a"); TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out)); EXPECT_EQ(out, b); TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out)); EXPECT_EQ(out, B); EXPECT_EQ(calls, 4); TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out)); EXPECT_EQ(out, a); EXPECT_EQ(calls, 5); TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out)); EXPECT_EQ(out, A); EXPECT_EQ(calls, 6); } TEST(RamFileBlockCacheTest, Prune) { int calls = 0; auto fetcher = [&calls](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { calls++; memset(buffer, 'x', n); TF_SetStatus(status, TF_OK, ""); return n; }; std::vector<char> out; std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv); uint64 now = Env::Default()->NowSeconds(); env->SetNowSeconds(now); tf_gcs_filesystem::RamFileBlockCache cache( 8, 32, 1 , fetcher, [&env]() { return env->NowSeconds(); }); TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out)); env->SetNowSeconds(now + 1); TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out)); TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out)); EXPECT_EQ(cache.CacheSize(), 24); EXPECT_EQ(calls, 3); TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out)); TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out)); TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out)); EXPECT_EQ(calls, 3); env->SetNowSeconds(now + 2); uint64 start = Env::Default()->NowSeconds(); do { Env::Default()->SleepForMicroseconds(100000); } while (cache.CacheSize() == 24 && Env::Default()->NowSeconds() - start < 3); EXPECT_EQ(cache.CacheSize(), 8); TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out)); EXPECT_EQ(calls, 3); env->SetNowSeconds(now + 3); start = Env::Default()->NowSeconds(); do { Env::Default()->SleepForMicroseconds(100000); } while (cache.CacheSize() == 8 && Env::Default()->NowSeconds() - start < 3); EXPECT_EQ(cache.CacheSize(), 0); } TEST(RamFileBlockCacheTest, ParallelReads) { const int callers = 4; BlockingCounter counter(callers); auto fetcher = [&counter](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { counter.DecrementCount(); if (!counter.WaitFor(std::chrono::seconds(10))) { TF_SetStatus(status, TF_FAILED_PRECONDITION, "desired concurrency not reached"); return -1; } memset(buffer, 'x', n); TF_SetStatus(status, TF_OK, ""); return n; }; const int block_size = 8; tf_gcs_filesystem::RamFileBlockCache cache( block_size, 2 * callers * block_size, 0, fetcher); std::vector<std::unique_ptr<Thread>> threads; threads.reserve(callers); for (int i = 0; i < callers; i++) { threads.emplace_back( Env::Default()->StartThread({}, "caller", [block_size, &cache, i]() { std::vector<char> out; TF_EXPECT_OK( ReadCache(&cache, "a", i * block_size, block_size, &out)); std::vector<char> x(block_size, 'x'); EXPECT_EQ(out, x); })); } } TEST(RamFileBlockCacheTest, CoalesceConcurrentReads) { const size_t block_size = 16; int num_requests = 0; Notification notification; auto fetcher = [&num_requests, &notification, block_size]( const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { EXPECT_EQ(n, block_size); EXPECT_EQ(offset, 0); num_requests++; memset(buffer, 'x', n); notification.Notify(); Env::Default()->SleepForMicroseconds(100000); TF_SetStatus(status, TF_OK, ""); return n; }; tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0, fetcher); std::unique_ptr<Thread> concurrent( Env::Default()->StartThread({}, "concurrent", [block_size, &cache] { std::vector<char> out; TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size / 2, &out)); EXPECT_EQ(out.size(), block_size / 2); })); notification.WaitForNotification(); std::vector<char> out; TF_EXPECT_OK(ReadCache(&cache, "", block_size / 2, block_size / 2, &out)); EXPECT_EQ(out.size(), block_size / 2); EXPECT_EQ(1, num_requests); } TEST(RamFileBlockCacheTest, Flush) { int calls = 0; auto fetcher = [&calls](const string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) -> int64_t { calls++; memset(buffer, 'x', n); TF_SetStatus(status, TF_OK, ""); return n; }; tf_gcs_filesystem::RamFileBlockCache cache(16, 32, 0, fetcher); std::vector<char> out; TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out)); TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out)); EXPECT_EQ(calls, 1); cache.Flush(); TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out)); EXPECT_EQ(calls, 2); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
55bf1df7-0188-4aae-92bb-2d657ff431ce
cpp
tensorflow/tensorflow
google_auth_provider
third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider_test.cc
#include "tsl/platform/cloud/google_auth_provider.h" #ifndef _WIN32 #include <pwd.h> #include <unistd.h> #else #include <sys/types.h> #endif #include <fstream> #include <utility> #include "absl/strings/match.h" #include "json/json.h" #include "tsl/platform/base64.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/retrying_utils.h" namespace tsl { namespace { constexpr char kGoogleApplicationCredentials[] = "GOOGLE_APPLICATION_CREDENTIALS"; constexpr char kGoogleAuthTokenForTesting[] = "GOOGLE_AUTH_TOKEN_FOR_TESTING"; constexpr char kCloudSdkConfig[] = "CLOUDSDK_CONFIG"; constexpr char kNoGceCheck[] = "NO_GCE_CHECK"; constexpr char kGCloudConfigFolder[] = ".config/gcloud/"; constexpr char kWellKnownCredentialsFile[] = "application_default_credentials.json"; constexpr int kExpirationTimeMarginSec = 60; constexpr char kOAuthV3Url[] = "https: constexpr char kOAuthV4Url[] = "https: constexpr char kGceTokenPath[] = "instance/service-accounts/default/token"; constexpr char kOAuthScope[] = "https: bool IsFile(const string& filename) { std::ifstream fstream(filename.c_str()); return fstream.good(); } absl::Status GetEnvironmentVariableFileName(string* filename) { if (!filename) { return errors::FailedPrecondition("'filename' cannot be nullptr."); } const char* result = std::getenv(kGoogleApplicationCredentials); if (!result || !IsFile(result)) { return errors::NotFound(strings::StrCat("$", kGoogleApplicationCredentials, " is not set or corrupt.")); } *filename = result; return absl::OkStatus(); } absl::Status GetWellKnownFileName(string* filename) { if (!filename) { return errors::FailedPrecondition("'filename' cannot be nullptr."); } string config_dir; const char* config_dir_override = std::getenv(kCloudSdkConfig); if (config_dir_override) { config_dir = config_dir_override; } else { const char* home_dir = std::getenv("HOME"); if (!home_dir) { return errors::FailedPrecondition("Could not read $HOME."); } config_dir = io::JoinPath(home_dir, kGCloudConfigFolder); } auto result = io::JoinPath(config_dir, kWellKnownCredentialsFile); if (!IsFile(result)) { return errors::NotFound( "Could not find the credentials file in the standard gcloud location."); } *filename = result; return absl::OkStatus(); } } GoogleAuthProvider::GoogleAuthProvider( std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client) : GoogleAuthProvider(std::unique_ptr<OAuthClient>(new OAuthClient()), std::move(compute_engine_metadata_client), Env::Default()) {} GoogleAuthProvider::GoogleAuthProvider( std::unique_ptr<OAuthClient> oauth_client, std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client, Env* env) : oauth_client_(std::move(oauth_client)), compute_engine_metadata_client_( std::move(compute_engine_metadata_client)), env_(env) {} absl::Status GoogleAuthProvider::GetToken(string* t) { mutex_lock lock(mu_); const uint64 now_sec = env_->NowSeconds(); if (now_sec + kExpirationTimeMarginSec < expiration_timestamp_sec_) { *t = current_token_; return absl::OkStatus(); } if (GetTokenForTesting().ok()) { *t = current_token_; return absl::OkStatus(); } auto token_from_files_status = GetTokenFromFiles(); if (token_from_files_status.ok()) { *t = current_token_; return absl::OkStatus(); } char* no_gce_check_var = std::getenv(kNoGceCheck); bool skip_gce_check = no_gce_check_var != nullptr && absl::EqualsIgnoreCase(no_gce_check_var, "true"); absl::Status token_from_gce_status; if (skip_gce_check) { token_from_gce_status = absl::Status(absl::StatusCode::kCancelled, strings::StrCat("GCE check skipped due to presence of $", kNoGceCheck, " environment variable.")); } else { token_from_gce_status = GetTokenFromGce(); } if (token_from_gce_status.ok()) { *t = current_token_; return absl::OkStatus(); } if (skip_gce_check) { LOG(INFO) << "Attempting an empty bearer token since no token was retrieved " << "from files, and GCE metadata check was skipped."; } else { LOG(WARNING) << "All attempts to get a Google authentication bearer token failed, " << "returning an empty token. Retrieving token from files failed with " "\"" << token_from_files_status.ToString() << "\"." << " Retrieving token from GCE failed with \"" << token_from_gce_status.ToString() << "\"."; } *t = ""; if (skip_gce_check) { expiration_timestamp_sec_ = 0; } else { expiration_timestamp_sec_ = UINT64_MAX; } current_token_ = ""; return absl::OkStatus(); } absl::Status GoogleAuthProvider::GetTokenFromFiles() { string credentials_filename; if (!GetEnvironmentVariableFileName(&credentials_filename).ok() && !GetWellKnownFileName(&credentials_filename).ok()) { return errors::NotFound("Could not locate the credentials file."); } Json::Value json; Json::Reader reader; std::ifstream credentials_fstream(credentials_filename); if (!reader.parse(credentials_fstream, json)) { return errors::FailedPrecondition( "Couldn't parse the JSON credentials file."); } if (json.isMember("refresh_token")) { TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromRefreshTokenJson( json, kOAuthV3Url, &current_token_, &expiration_timestamp_sec_)); } else if (json.isMember("private_key")) { TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromServiceAccountJson( json, kOAuthV4Url, kOAuthScope, &current_token_, &expiration_timestamp_sec_)); } else { return errors::FailedPrecondition( "Unexpected content of the JSON credentials file."); } return absl::OkStatus(); } absl::Status GoogleAuthProvider::GetTokenFromGce() { std::vector<char> response_buffer; const uint64 request_timestamp_sec = env_->NowSeconds(); TF_RETURN_IF_ERROR(compute_engine_metadata_client_->GetMetadata( kGceTokenPath, &response_buffer)); absl::string_view response = absl::string_view(&response_buffer[0], response_buffer.size()); TF_RETURN_IF_ERROR(oauth_client_->ParseOAuthResponse( response, request_timestamp_sec, &current_token_, &expiration_timestamp_sec_)); return absl::OkStatus(); } absl::Status GoogleAuthProvider::GetTokenForTesting() { const char* token = std::getenv(kGoogleAuthTokenForTesting); if (!token) { return errors::NotFound("The env variable for testing was not set."); } expiration_timestamp_sec_ = UINT64_MAX; current_token_ = token; return absl::OkStatus(); } }
#include "tsl/platform/cloud/google_auth_provider.h" #include <stdlib.h> #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/cloud/http_request_fake.h" #include "tsl/platform/path.h" #include "tsl/platform/test.h" namespace tsl { namespace { string TestData() { return io::JoinPath(testing::TslSrcRoot(), "platform", "cloud", "testdata"); } class FakeEnv : public EnvWrapper { public: FakeEnv() : EnvWrapper(Env::Default()) {} uint64 NowSeconds() const override { return now; } uint64 now = 10000; }; class FakeOAuthClient : public OAuthClient { public: absl::Status GetTokenFromServiceAccountJson( Json::Value json, absl::string_view oauth_server_uri, absl::string_view scope, string* token, uint64* expiration_timestamp_sec) override { provided_credentials_json = json; *token = return_token; *expiration_timestamp_sec = return_expiration_timestamp; return absl::OkStatus(); } absl::Status GetTokenFromRefreshTokenJson( Json::Value json, absl::string_view oauth_server_uri, string* token, uint64* expiration_timestamp_sec) override { provided_credentials_json = json; *token = return_token; *expiration_timestamp_sec = return_expiration_timestamp; return absl::OkStatus(); } string return_token; uint64 return_expiration_timestamp; Json::Value provided_credentials_json; }; } class GoogleAuthProviderTest : public ::testing::Test { protected: void SetUp() override { ClearEnvVars(); } void TearDown() override { ClearEnvVars(); } void ClearEnvVars() { unsetenv("CLOUDSDK_CONFIG"); unsetenv("GOOGLE_APPLICATION_CREDENTIALS"); unsetenv("GOOGLE_AUTH_TOKEN_FOR_TESTING"); unsetenv("NO_GCE_CHECK"); } }; TEST_F(GoogleAuthProviderTest, EnvironmentVariable_Caching) { setenv("GOOGLE_APPLICATION_CREDENTIALS", io::JoinPath(TestData(), "service_account_credentials.json").c_str(), 1); setenv("CLOUDSDK_CONFIG", TestData().c_str(), 1); auto oauth_client = new FakeOAuthClient; std::vector<HttpRequest*> requests; FakeEnv env; std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests); auto metadataClient = std::make_shared<ComputeEngineMetadataClient>( fakeHttpRequestFactory, RetryConfig(0 )); GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client), metadataClient, &env); oauth_client->return_token = "fake-token"; oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600; string token; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("fake-token", token); EXPECT_EQ("fake_key_id", oauth_client->provided_credentials_json.get("private_key_id", "") .asString()); oauth_client->return_token = "new-fake-token"; env.now += 3000; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("fake-token", token); env.now += 598; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("new-fake-token", token); } TEST_F(GoogleAuthProviderTest, GCloudRefreshToken) { setenv("CLOUDSDK_CONFIG", TestData().c_str(), 1); auto oauth_client = new FakeOAuthClient; std::vector<HttpRequest*> requests; FakeEnv env; std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests); auto metadataClient = std::make_shared<ComputeEngineMetadataClient>( fakeHttpRequestFactory, RetryConfig(0 )); GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client), metadataClient, &env); oauth_client->return_token = "fake-token"; oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600; string token; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("fake-token", token); EXPECT_EQ("fake-refresh-token", oauth_client->provided_credentials_json.get("refresh_token", "") .asString()); } TEST_F(GoogleAuthProviderTest, RunningOnGCE) { auto oauth_client = new FakeOAuthClient; std::vector<HttpRequest*> requests( {new FakeHttpRequest( "Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", R"( { "access_token":"fake-gce-token", "expires_in": 3920, "token_type":"Bearer" })"), new FakeHttpRequest( "Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", "", errors::Unavailable("503"), 503), new FakeHttpRequest( "Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", R"( { "access_token":"new-fake-gce-token", "expires_in": 3920, "token_type":"Bearer" })")}); FakeEnv env; std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests); auto metadataClient = std::make_shared<ComputeEngineMetadataClient>( fakeHttpRequestFactory, RetryConfig(0 )); GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client), metadataClient, &env); string token; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("fake-gce-token", token); env.now += 3700; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("fake-gce-token", token); env.now += 598; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("new-fake-gce-token", token); } TEST_F(GoogleAuthProviderTest, OverrideForTesting) { setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "tokenForTesting", 1); auto oauth_client = new FakeOAuthClient; std::vector<HttpRequest*> empty_requests; FakeEnv env; std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&empty_requests); auto metadataClient = std::make_shared<ComputeEngineMetadataClient>( fakeHttpRequestFactory, RetryConfig(0 )); GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client), metadataClient, &env); string token; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("tokenForTesting", token); } TEST_F(GoogleAuthProviderTest, NothingAvailable) { auto oauth_client = new FakeOAuthClient; std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: http: "/service-accounts/default/token\n" "Header Metadata-Flavor: Google\n", "", errors::NotFound("404"), 404)}); FakeEnv env; std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests); auto metadataClient = std::make_shared<ComputeEngineMetadataClient>( fakeHttpRequestFactory, RetryConfig(0 )); GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client), metadataClient, &env); string token; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("", token); } TEST_F(GoogleAuthProviderTest, NoGceCheckEnvironmentVariable) { setenv("NO_GCE_CHECK", "True", 1); auto oauth_client = new FakeOAuthClient; FakeEnv env; GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client), nullptr, &env); string token; TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("", token); setenv("NO_GCE_CHECK", "true", 1); TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("", token); setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "newToken", 1); TF_EXPECT_OK(provider.GetToken(&token)); EXPECT_EQ("newToken", token); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fc6b01ea-f0a3-4da6-bd55-c32d499105a4
cpp
tensorflow/tensorflow
curl_http_request
third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request_test.cc
#include "tsl/platform/cloud/curl_http_request.h" #include <algorithm> #include "xla/tsl/lib/gtl/map_util.h" #include "xla/tsl/util/env_var.h" #include "tsl/platform/errors.h" #include "tsl/platform/macros.h" #include "tsl/platform/scanner.h" #include "tsl/platform/str_util.h" #include "tsl/platform/types.h" #define CHECK_CURL_OK(expr) CHECK_EQ(expr, CURLE_OK) namespace tsl { namespace { constexpr uint64 kVerboseOutput = 0; class LibCurlProxy : public LibCurl { public: static LibCurlProxy* Load() { static LibCurlProxy* libcurl = []() -> LibCurlProxy* { curl_global_init(CURL_GLOBAL_ALL); return new LibCurlProxy; }(); return libcurl; } CURL* curl_easy_init() override { return ::curl_easy_init(); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, uint64 param) override { return ::curl_easy_setopt(curl, option, param); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, const char* param) override { return ::curl_easy_setopt(curl, option, param); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, void* param) override { return ::curl_easy_setopt(curl, option, param); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, size_t (*param)(void*, size_t, size_t, FILE*)) override { return ::curl_easy_setopt(curl, option, param); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, size_t (*param)(const void*, size_t, size_t, void*)) override { return ::curl_easy_setopt(curl, option, param); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, int (*param)(void* clientp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow)) override { return ::curl_easy_setopt(curl, option, param); } CURLcode curl_easy_perform(CURL* curl) override { return ::curl_easy_perform(curl); } CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info, uint64* value) override { return ::curl_easy_getinfo(curl, info, value); } CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info, double* value) override { return ::curl_easy_getinfo(curl, info, value); } void curl_easy_cleanup(CURL* curl) override { return ::curl_easy_cleanup(curl); } char* curl_easy_escape(CURL* curl, const char* str, int length) override { return ::curl_easy_escape(curl, str, length); } curl_slist* curl_slist_append(curl_slist* list, const char* str) override { return ::curl_slist_append(list, str); } void curl_slist_free_all(curl_slist* list) override { return ::curl_slist_free_all(list); } void curl_free(void* p) override { ::curl_free(p); } }; } CurlHttpRequest::CurlHttpRequest() : CurlHttpRequest(LibCurlProxy::Load()) {} CurlHttpRequest::CurlHttpRequest(LibCurl* libcurl, Env* env) : libcurl_(libcurl), env_(env) { default_response_buffer_.reserve(CURL_MAX_WRITE_SIZE); curl_ = libcurl_->curl_easy_init(); CHECK(curl_ != nullptr) << "Couldn't initialize a curl session."; std::string value = ""; TF_CHECK_OK(ReadStringFromEnvVar("CURL_CA_BUNDLE", "", &value)); if (!value.empty()) { CHECK_CURL_OK( libcurl_->curl_easy_setopt(curl_, CURLOPT_CAINFO, value.c_str())); } CHECK_CURL_OK( libcurl_->curl_easy_setopt(curl_, CURLOPT_VERBOSE, kVerboseOutput)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_USERAGENT, "TSL")); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_NOSIGNAL, 1L)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1)); CHECK_CURL_OK( libcurl_->curl_easy_setopt(curl_, CURLOPT_NOPROGRESS, uint64{0})); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFODATA, this)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFOFUNCTION, &CurlHttpRequest::ProgressCallback)); SetResultBuffer(&default_response_buffer_); } CurlHttpRequest::~CurlHttpRequest() { if (curl_headers_) { libcurl_->curl_slist_free_all(curl_headers_); } if (resolve_list_) { libcurl_->curl_slist_free_all(resolve_list_); } if (put_body_) { if (fclose(put_body_) != 0) { LOG(ERROR) << "fclose() failed: " << strerror(errno); } } if (curl_) { libcurl_->curl_easy_cleanup(curl_); } } string CurlHttpRequest::EscapeString(const string& str) { char* out_char_str = libcurl_->curl_easy_escape(curl_, str.c_str(), 0); string out_str(out_char_str); libcurl_->curl_free(out_char_str); return out_str; } void CurlHttpRequest::SetUri(const string& uri) { CheckNotSent(); is_uri_set_ = true; uri_ = uri; CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_URL, uri.c_str())); } void CurlHttpRequest::SetRange(uint64 start, uint64 end) { CheckNotSent(); CHECK_CURL_OK(libcurl_->curl_easy_setopt( curl_, CURLOPT_RANGE, strings::StrCat(start, "-", end).c_str())); } void CurlHttpRequest::AddHeader(const string& name, const string& value) { CheckNotSent(); curl_headers_ = libcurl_->curl_slist_append( curl_headers_, strings::StrCat(name, ": ", value).c_str()); } void CurlHttpRequest::AddResolveOverride(const string& hostname, int64_t port, const string& ip_addr) { CheckNotSent(); resolve_list_ = libcurl_->curl_slist_append( resolve_list_, strings::StrCat(hostname, ":", port, ":", ip_addr).c_str()); } void CurlHttpRequest::AddAuthBearerHeader(const string& auth_token) { CheckNotSent(); if (!auth_token.empty()) { AddHeader("Authorization", strings::StrCat("Bearer ", auth_token)); } } void CurlHttpRequest::SetRequestStats(RequestStats* stats) { CheckNotSent(); CHECK(stats_ == nullptr) << "SetRequestStats already called"; stats_ = stats; } void CurlHttpRequest::SetDeleteRequest() { CheckNotSent(); CheckMethodNotSet(); is_method_set_ = true; method_ = RequestMethod::kDelete; CHECK_CURL_OK( libcurl_->curl_easy_setopt(curl_, CURLOPT_CUSTOMREQUEST, "DELETE")); } absl::Status CurlHttpRequest::SetPutFromFile(const string& body_filepath, size_t offset) { CheckNotSent(); CheckMethodNotSet(); is_method_set_ = true; method_ = RequestMethod::kPut; if (put_body_) { if (fclose(put_body_) != 0) { LOG(ERROR) << "fclose() failed: " << strerror(errno); } } put_body_ = fopen(body_filepath.c_str(), "r"); if (!put_body_) { return errors::InvalidArgument("Couldn't open the specified file: " + body_filepath); } fseek(put_body_, 0, SEEK_END); const auto size = ftell(put_body_) - offset; fseek(put_body_, offset, SEEK_SET); curl_headers_ = libcurl_->curl_slist_append( curl_headers_, strings::StrCat("Content-Length: ", size).c_str()); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA, reinterpret_cast<void*>(put_body_))); return absl::OkStatus(); } void CurlHttpRequest::SetPutEmptyBody() { CheckNotSent(); CheckMethodNotSet(); is_method_set_ = true; method_ = RequestMethod::kPut; CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1)); AddHeader("Content-Length", "0"); AddHeader("Transfer-Encoding", "identity"); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA, reinterpret_cast<void*>(this))); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION, &CurlHttpRequest::ReadCallback)); } void CurlHttpRequest::SetPostFromBuffer(const char* buffer, size_t size) { CheckNotSent(); CheckMethodNotSet(); is_method_set_ = true; method_ = RequestMethod::kPost; curl_headers_ = libcurl_->curl_slist_append( curl_headers_, strings::StrCat("Content-Length: ", size).c_str()); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA, reinterpret_cast<void*>(this))); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION, &CurlHttpRequest::ReadCallback)); post_body_buffer_ = absl::string_view(buffer, size); } void CurlHttpRequest::SetPostEmptyBody() { CheckNotSent(); CheckMethodNotSet(); is_method_set_ = true; method_ = RequestMethod::kPost; CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1)); AddHeader("Content-Length", "0"); AddHeader("Transfer-Encoding", "identity"); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA, reinterpret_cast<void*>(this))); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION, &CurlHttpRequest::ReadCallback)); } void CurlHttpRequest::SetResultBuffer(std::vector<char>* out_buffer) { CheckNotSent(); CHECK(out_buffer != nullptr); out_buffer->clear(); response_buffer_ = out_buffer; CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA, reinterpret_cast<void*>(this))); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEFUNCTION, &CurlHttpRequest::WriteCallback)); } void CurlHttpRequest::SetResultBufferDirect(char* buffer, size_t size) { CHECK(buffer != nullptr); CheckNotSent(); direct_response_ = DirectResponseState{buffer, size, 0, 0}; CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA, reinterpret_cast<void*>(this))); CHECK_CURL_OK(libcurl_->curl_easy_setopt( curl_, CURLOPT_WRITEFUNCTION, &CurlHttpRequest::WriteCallbackDirect)); } bool CurlHttpRequest::IsDirectResponse() const { return direct_response_.buffer_ != nullptr; } size_t CurlHttpRequest::WriteCallbackDirect(const void* ptr, size_t size, size_t nmemb, void* userdata) { CHECK(ptr != nullptr); auto that = reinterpret_cast<CurlHttpRequest*>(userdata); DirectResponseState* state = &that->direct_response_; CHECK(state->buffer_ != nullptr); CHECK(state->bytes_transferred_ <= state->buffer_size_); size_t curl_bytes_received = size * nmemb; size_t user_buffer_bytes_available = state->buffer_size_ - state->bytes_transferred_; size_t bytes_to_copy = std::min<size_t>(curl_bytes_received, user_buffer_bytes_available); memcpy(&state->buffer_[state->bytes_transferred_], ptr, bytes_to_copy); state->bytes_transferred_ += bytes_to_copy; state->bytes_received_ += curl_bytes_received; return bytes_to_copy; } size_t CurlHttpRequest::GetResultBufferDirectBytesTransferred() { CHECK(direct_response_.buffer_ != nullptr); return direct_response_.bytes_transferred_; } void CurlHttpRequest::SetTimeouts(uint32 connection, uint32 inactivity, uint32 total) { CheckNotSent(); connect_timeout_secs_ = connection; inactivity_timeout_secs_ = inactivity; request_timeout_secs_ = total; } size_t CurlHttpRequest::WriteCallback(const void* ptr, size_t size, size_t nmemb, void* this_object) { CHECK(ptr); auto that = reinterpret_cast<CurlHttpRequest*>(this_object); CHECK(that->response_buffer_); const size_t bytes_to_copy = size * nmemb; that->response_buffer_->insert( that->response_buffer_->end(), reinterpret_cast<const char*>(ptr), reinterpret_cast<const char*>(ptr) + bytes_to_copy); return bytes_to_copy; } size_t CurlHttpRequest::ReadCallback(void* ptr, size_t size, size_t nmemb, FILE* this_object) { CHECK(ptr); auto that = reinterpret_cast<CurlHttpRequest*>(this_object); CHECK(that->post_body_read_ <= that->post_body_buffer_.size()); const size_t bytes_to_copy = std::min( size * nmemb, that->post_body_buffer_.size() - that->post_body_read_); memcpy(ptr, that->post_body_buffer_.data() + that->post_body_read_, bytes_to_copy); that->post_body_read_ += bytes_to_copy; return bytes_to_copy; } size_t CurlHttpRequest::HeaderCallback(const void* ptr, size_t size, size_t nmemb, void* this_object) { CHECK(ptr); auto that = reinterpret_cast<CurlHttpRequest*>(this_object); absl::string_view header(reinterpret_cast<const char*>(ptr), size * nmemb); absl::string_view name, value; if (strings::Scanner(header) .ScanEscapedUntil(':') .StopCapture() .OneLiteral(": ") .GetResult(&value, &name)) { string str_value(value); absl::StripTrailingAsciiWhitespace(&str_value); that->response_headers_[string(name)] = str_value; } return size * nmemb; } absl::Status CurlHttpRequest::Send() { CheckNotSent(); CHECK(is_uri_set_) << "URI has not been set."; is_sent_ = true; if (curl_headers_) { CHECK_CURL_OK( libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTPHEADER, curl_headers_)); } if (resolve_list_) { CHECK_CURL_OK( libcurl_->curl_easy_setopt(curl_, CURLOPT_RESOLVE, resolve_list_)); } CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERDATA, reinterpret_cast<void*>(this))); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERFUNCTION, &CurlHttpRequest::HeaderCallback)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_TIMEOUT, request_timeout_secs_)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_CONNECTTIMEOUT, connect_timeout_secs_)); char error_buffer[CURL_ERROR_SIZE] = {0}; CHECK_CURL_OK( libcurl_->curl_easy_setopt(curl_, CURLOPT_ERRORBUFFER, error_buffer)); if (stats_ != nullptr) { stats_->RecordRequest(this, uri_, method_); } const CURLcode curl_result = libcurl_->curl_easy_perform(curl_); TF_RETURN_IF_ERROR(CURLcodeToStatus(curl_result, error_buffer)); double written_size = 0; CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_SIZE_DOWNLOAD, &written_size)); CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_RESPONSE_CODE, &response_code_)); auto get_error_message = [this]() -> string { string error_message = strings::StrCat( "Error executing an HTTP request: HTTP response code ", response_code_); absl::string_view body = GetResponse(); if (!body.empty()) { return strings::StrCat( error_message, " with body '", body.substr(0, std::min(body.size(), response_to_error_limit_)), "'"); } return error_message; }; absl::Status result; switch (response_code_) { case 200: case 201: case 204: case 206: result = absl::OkStatus(); break; case 416: response_buffer_->clear(); if (IsDirectResponse()) { direct_response_.bytes_transferred_ = 0; } result = absl::OkStatus(); break; case 400: case 406: case 411: case 414: result = errors::InvalidArgument(get_error_message()); break; case 401: case 403: case 407: result = errors::PermissionDenied(get_error_message()); break; case 404: case 410: result = errors::NotFound(get_error_message()); break; case 302: case 303: case 304: case 307: case 412: case 413: result = errors::FailedPrecondition(get_error_message()); break; case 308: case 409: case 429: case 500: case 502: case 503: default: result = errors::Unavailable(get_error_message()); break; } if (!result.ok()) { response_buffer_->clear(); } if (stats_ != nullptr) { stats_->RecordResponse(this, uri_, method_, result); } return result; } void CurlHttpRequest::CheckMethodNotSet() const { CHECK(!is_method_set_) << "HTTP method has been already set."; } void CurlHttpRequest::CheckNotSent() const { CHECK(!is_sent_) << "The request has already been sent."; } absl::string_view CurlHttpRequest::GetResponse() const { absl::string_view response; if (IsDirectResponse()) { response = absl::string_view(direct_response_.buffer_, direct_response_.bytes_transferred_); } else { response = absl::string_view(response_buffer_->data(), response_buffer_->size()); } return response; } string CurlHttpRequest::GetResponseHeader(const string& name) const { const auto& header = response_headers_.find(name); return header != response_headers_.end() ? header->second : ""; } uint64 CurlHttpRequest::GetResponseCode() const { return response_code_; } int CurlHttpRequest::ProgressCallback(void* this_object, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) { auto that = reinterpret_cast<CurlHttpRequest*>(this_object); const auto now = that->env_->NowSeconds(); const auto current_progress = dlnow + ulnow; if (that->last_progress_timestamp_ == 0 || current_progress > that->last_progress_bytes_) { that->last_progress_timestamp_ = now; that->last_progress_bytes_ = current_progress; return 0; } if (now - that->last_progress_timestamp_ > that->inactivity_timeout_secs_) { double lookup_time = -1; const auto lookup_time_status = that->libcurl_->curl_easy_getinfo( that->curl_, CURLINFO_NAMELOOKUP_TIME, &lookup_time); double connect_time = -1; const auto connect_time_status = that->libcurl_->curl_easy_getinfo( that->curl_, CURLINFO_CONNECT_TIME, &connect_time); double pretransfer_time = -1; const auto pretransfer_time_status = that->libcurl_->curl_easy_getinfo( that->curl_, CURLINFO_PRETRANSFER_TIME, &pretransfer_time); double starttransfer_time = -1; const auto starttransfer_time_status = that->libcurl_->curl_easy_getinfo( that->curl_, CURLINFO_STARTTRANSFER_TIME, &starttransfer_time); LOG(ERROR) << "The transmission of request " << this_object << " (URI: " << that->uri_ << ") has been stuck at " << current_progress << " of " << dltotal + ultotal << " bytes for " << now - that->last_progress_timestamp_ << " seconds and will be aborted. CURL timing information: " << "lookup time: " << lookup_time << " (" << curl_easy_strerror(lookup_time_status) << "), connect time: " << connect_time << " (" << curl_easy_strerror(connect_time_status) << "), pre-transfer time: " << pretransfer_time << " (" << curl_easy_strerror(pretransfer_time_status) << "), start-transfer time: " << starttransfer_time << " (" << curl_easy_strerror(starttransfer_time_status) << ")"; return 1; } return 0; } absl::Status CurlHttpRequest::CURLcodeToStatus(CURLcode code, const char* error_buffer) { if (code == CURLE_OK) { return absl::OkStatus(); } string error_message = strings::StrCat( "Error executing an HTTP request: libcurl code ", code, " meaning '", curl_easy_strerror(code), "', error details: "); if (code == CURLE_WRITE_ERROR && IsDirectResponse() && direct_response_.bytes_received_ > direct_response_.buffer_size_) { string overflow_message = strings::StrCat( "Received ", direct_response_.bytes_received_, " response bytes ", "for a ", direct_response_.buffer_size_, "-byte buffer"); uint64 response_code = 0; const CURLcode get_response_result = libcurl_->curl_easy_getinfo( curl_, CURLINFO_RESPONSE_CODE, &response_code); if (get_response_result == CURLE_OK && response_code == 416) { return absl::OkStatus(); } return errors::FailedPrecondition( strings::StrCat(error_message, overflow_message)); } if (code == CURLE_COULDNT_RESOLVE_HOST || code == CURLE_SSL_CACERT_BADFILE) { return errors::FailedPrecondition( strings::StrCat(error_message, error_buffer)); } return errors::Unavailable( strings::StrCat(error_message, *error_buffer ? error_buffer : "(none)")); } }
#include "tsl/platform/cloud/curl_http_request.h" #include <fstream> #include <string> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/mem.h" #include "tsl/platform/path.h" #include "tsl/platform/platform.h" #include "tsl/platform/test.h" namespace tsl { namespace { const string kTestContent = "random original scratch content"; class FakeEnv : public EnvWrapper { public: FakeEnv() : EnvWrapper(Env::Default()) {} uint64 NowSeconds() const override { return now_; } uint64 now_ = 10000; }; class FakeLibCurl : public LibCurl { public: FakeLibCurl(const string& response_content, uint64 response_code) : response_content_(response_content), response_code_(response_code) {} FakeLibCurl(const string& response_content, uint64 response_code, std::vector<std::tuple<uint64, curl_off_t>> progress_ticks, FakeEnv* env) : response_content_(response_content), response_code_(response_code), progress_ticks_(std::move(progress_ticks)), env_(env) {} FakeLibCurl(const string& response_content, uint64 response_code, const std::vector<string>& response_headers) : response_content_(response_content), response_code_(response_code), response_headers_(response_headers) {} CURL* curl_easy_init() override { is_initialized_ = true; return reinterpret_cast<CURL*>(this); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, uint64 param) override { switch (option) { case CURLOPT_POST: is_post_ = param; break; case CURLOPT_PUT: is_put_ = param; break; default: break; } return CURLE_OK; } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, const char* param) override { return curl_easy_setopt(curl, option, reinterpret_cast<void*>(const_cast<char*>(param))); } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, void* param) override { switch (option) { case CURLOPT_URL: url_ = reinterpret_cast<char*>(param); break; case CURLOPT_RANGE: range_ = reinterpret_cast<char*>(param); break; case CURLOPT_CUSTOMREQUEST: custom_request_ = reinterpret_cast<char*>(param); break; case CURLOPT_HTTPHEADER: headers_ = reinterpret_cast<std::vector<string>*>(param); break; case CURLOPT_ERRORBUFFER: error_buffer_ = reinterpret_cast<char*>(param); break; case CURLOPT_CAINFO: ca_info_ = reinterpret_cast<char*>(param); break; case CURLOPT_WRITEDATA: write_data_ = reinterpret_cast<FILE*>(param); break; case CURLOPT_HEADERDATA: header_data_ = reinterpret_cast<FILE*>(param); break; case CURLOPT_READDATA: read_data_ = reinterpret_cast<FILE*>(param); break; case CURLOPT_XFERINFODATA: progress_data_ = param; break; default: break; } return CURLE_OK; } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, size_t (*param)(void*, size_t, size_t, FILE*)) override { read_callback_ = param; return CURLE_OK; } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, size_t (*param)(const void*, size_t, size_t, void*)) override { switch (option) { case CURLOPT_WRITEFUNCTION: write_callback_ = param; break; case CURLOPT_HEADERFUNCTION: header_callback_ = param; break; default: break; } return CURLE_OK; } CURLcode curl_easy_setopt(CURL* curl, CURLoption option, int (*param)(void* clientp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow)) override { progress_callback_ = param; return CURLE_OK; } CURLcode curl_easy_perform(CURL* curl) override { if (is_post_ || is_put_) { char buffer[3]; int bytes_read; posted_content_ = ""; do { bytes_read = read_callback_(buffer, 1, sizeof(buffer), read_data_); posted_content_ = strings::StrCat( posted_content_, absl::string_view(buffer, bytes_read)); } while (bytes_read > 0); } if (write_data_ || write_callback_) { size_t bytes_handled = write_callback_( response_content_.c_str(), 1, response_content_.size(), write_data_); if (bytes_handled != response_content_.size()) { curl_easy_perform_result_ = CURLE_WRITE_ERROR; } } for (const auto& header : response_headers_) { header_callback_(header.c_str(), 1, header.size(), header_data_); } if (error_buffer_) { strncpy(error_buffer_, curl_easy_perform_error_message_.c_str(), curl_easy_perform_error_message_.size() + 1); } for (const auto& tick : progress_ticks_) { env_->now_ = std::get<0>(tick); if (progress_callback_(progress_data_, 0, std::get<1>(tick), 0, 0)) { return CURLE_ABORTED_BY_CALLBACK; } } return curl_easy_perform_result_; } CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info, uint64* value) override { switch (info) { case CURLINFO_RESPONSE_CODE: *value = response_code_; break; default: break; } return CURLE_OK; } CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info, double* value) override { switch (info) { case CURLINFO_SIZE_DOWNLOAD: *value = response_content_.size(); break; default: break; } return CURLE_OK; } void curl_easy_cleanup(CURL* curl) override { is_cleaned_up_ = true; } curl_slist* curl_slist_append(curl_slist* list, const char* str) override { std::vector<string>* v = list ? reinterpret_cast<std::vector<string>*>(list) : new std::vector<string>(); v->push_back(str); return reinterpret_cast<curl_slist*>(v); } char* curl_easy_escape(CURL* curl, const char* str, int length) override { const string victim = "/"; const string encoded = "%2F"; string temp_str = str; std::string::size_type n = 0; while ((n = temp_str.find(victim, n)) != std::string::npos) { temp_str.replace(n, victim.size(), encoded); n += encoded.size(); } char* out_char_str = reinterpret_cast<char*>( port::Malloc(sizeof(char) * temp_str.size() + 1)); std::copy(temp_str.begin(), temp_str.end(), out_char_str); out_char_str[temp_str.size()] = '\0'; return out_char_str; } void curl_slist_free_all(curl_slist* list) override { delete reinterpret_cast<std::vector<string>*>(list); } void curl_free(void* p) override { port::Free(p); } string response_content_; uint64 response_code_; std::vector<string> response_headers_; string url_; string range_; string custom_request_; string ca_info_; char* error_buffer_ = nullptr; bool is_initialized_ = false; bool is_cleaned_up_ = false; std::vector<string>* headers_ = nullptr; bool is_post_ = false; bool is_put_ = false; void* write_data_ = nullptr; size_t (*write_callback_)(const void* ptr, size_t size, size_t nmemb, void* userdata) = nullptr; void* header_data_ = nullptr; size_t (*header_callback_)(const void* ptr, size_t size, size_t nmemb, void* userdata) = nullptr; FILE* read_data_ = nullptr; size_t (*read_callback_)(void* ptr, size_t size, size_t nmemb, FILE* userdata) = &fread; int (*progress_callback_)(void* clientp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) = nullptr; void* progress_data_ = nullptr; string posted_content_; CURLcode curl_easy_perform_result_ = CURLE_OK; string curl_easy_perform_error_message_; std::vector<std::tuple<uint64, curl_off_t>> progress_ticks_; FakeEnv* env_ = nullptr; }; TEST(CurlHttpRequestTest, GetRequest) { FakeLibCurl libcurl("get response", 200); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end()); scratch.reserve(100); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBuffer(&scratch); TF_EXPECT_OK(http_request.Send()); EXPECT_EQ("get response", string(scratch.begin(), scratch.end())); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("100-199", libcurl.range_); EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ("", libcurl.ca_info_); EXPECT_EQ(1, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_FALSE(libcurl.is_post_); EXPECT_EQ(200, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, GetRequest_Direct) { FakeLibCurl libcurl("get response", 200); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch(100, 0); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBufferDirect(scratch.data(), scratch.capacity()); TF_EXPECT_OK(http_request.Send()); string expected_response = "get response"; size_t response_bytes_transferred = http_request.GetResultBufferDirectBytesTransferred(); EXPECT_EQ(expected_response.size(), response_bytes_transferred); EXPECT_EQ( "get response", string(scratch.begin(), scratch.begin() + response_bytes_transferred)); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("100-199", libcurl.range_); EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ("", libcurl.ca_info_); EXPECT_EQ(1, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_FALSE(libcurl.is_post_); EXPECT_EQ(200, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, GetRequest_CustomCaInfoFlag) { static char set_var[] = "CURL_CA_BUNDLE=test"; putenv(set_var); FakeLibCurl libcurl("get response", 200); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end()); scratch.reserve(100); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBuffer(&scratch); TF_EXPECT_OK(http_request.Send()); EXPECT_EQ("get response", string(scratch.begin(), scratch.end())); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("100-199", libcurl.range_); EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ("test", libcurl.ca_info_); EXPECT_EQ(1, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_FALSE(libcurl.is_post_); EXPECT_EQ(200, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, GetRequest_Direct_ResponseTooLarge) { FakeLibCurl libcurl("get response", 200); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch(5, 0); http_request.SetUri("http: http_request.SetResultBufferDirect(scratch.data(), scratch.size()); const absl::Status& status = http_request.Send(); EXPECT_EQ(error::FAILED_PRECONDITION, status.code()); EXPECT_EQ( "Error executing an HTTP request: libcurl code 23 meaning " "'Failed writing received data to disk/application', error details: " "Received 12 response bytes for a 5-byte buffer", status.message()); EXPECT_EQ(5, http_request.GetResultBufferDirectBytesTransferred()); EXPECT_EQ("get r", string(scratch.begin(), scratch.begin() + 5)); } TEST(CurlHttpRequestTest, GetRequest_Direct_RangeOutOfBound) { FakeLibCurl libcurl("get response", 416); CurlHttpRequest http_request(&libcurl); const string initialScratch = "abcde"; std::vector<char> scratch; scratch.insert(scratch.end(), initialScratch.begin(), initialScratch.end()); http_request.SetUri("http: http_request.SetRange(0, 4); http_request.SetResultBufferDirect(scratch.data(), scratch.size()); TF_EXPECT_OK(http_request.Send()); EXPECT_EQ(416, http_request.GetResponseCode()); EXPECT_EQ(0, http_request.GetResultBufferDirectBytesTransferred()); EXPECT_EQ("get r", string(scratch.begin(), scratch.end())); } TEST(CurlHttpRequestTest, GetRequest_Empty) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.resize(0); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBuffer(&scratch); TF_EXPECT_OK(http_request.Send()); EXPECT_TRUE(scratch.empty()); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("100-199", libcurl.range_); EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ(1, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_FALSE(libcurl.is_post_); EXPECT_EQ(200, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, GetRequest_RangeOutOfBound) { FakeLibCurl libcurl("get response", 416); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end()); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBuffer(&scratch); TF_EXPECT_OK(http_request.Send()); EXPECT_TRUE(scratch.empty()); EXPECT_EQ(416, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, GetRequest_503) { FakeLibCurl libcurl("get response", 503); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end()); http_request.SetUri("http: http_request.SetResultBuffer(&scratch); const auto& status = http_request.Send(); EXPECT_EQ(error::UNAVAILABLE, status.code()); EXPECT_EQ( "Error executing an HTTP request: HTTP response code 503 with body " "'get response'", status.message()); } TEST(CurlHttpRequestTest, GetRequest_HttpCode0) { FakeLibCurl libcurl("get response", 0); libcurl.curl_easy_perform_result_ = CURLE_OPERATION_TIMEDOUT; libcurl.curl_easy_perform_error_message_ = "Operation timed out"; CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end()); http_request.SetUri("http: const auto& status = http_request.Send(); EXPECT_EQ(error::UNAVAILABLE, status.code()); EXPECT_EQ( "Error executing an HTTP request: libcurl code 28 meaning " "'Timeout was reached', error details: Operation timed out", status.message()); EXPECT_EQ(0, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, GetRequest_CouldntResolveHost) { FakeLibCurl libcurl("get response", 0); libcurl.curl_easy_perform_result_ = CURLE_COULDNT_RESOLVE_HOST; libcurl.curl_easy_perform_error_message_ = "Could not resolve host 'metadata'"; CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end()); http_request.SetUri("http: const auto& status = http_request.Send(); EXPECT_EQ(error::FAILED_PRECONDITION, status.code()); EXPECT_EQ( absl::StrCat( "Error executing an HTTP request: libcurl code 6 meaning ", (kIsOpenSource ? "'Couldn't resolve host name', error details: " : "'Could not resolve hostname', error details: "), "Could not resolve host ", "'metadata'"), status.message()); EXPECT_EQ(0, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, GetRequest_SslBadCertfile) { FakeLibCurl libcurl("get response", 0); libcurl.curl_easy_perform_result_ = CURLE_SSL_CACERT_BADFILE; libcurl.curl_easy_perform_error_message_ = "error setting certificate verify locations:"; CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end()); http_request.SetUri("http: const auto& status = http_request.Send(); EXPECT_EQ(error::FAILED_PRECONDITION, status.code()); EXPECT_EQ( "Error executing an HTTP request: libcurl code 77 meaning " "'Problem with the SSL CA cert (path? access rights?)', error details: " "error setting certificate verify locations:", status.message()); EXPECT_EQ(0, http_request.GetResponseCode()); } TEST(CurlHttpRequestTest, ResponseHeaders) { FakeLibCurl libcurl( "get response", 200, {"Location: abcd", "Content-Type: text", "unparsable header"}); CurlHttpRequest http_request(&libcurl); http_request.SetUri("http: TF_EXPECT_OK(http_request.Send()); EXPECT_EQ("abcd", http_request.GetResponseHeader("Location")); EXPECT_EQ("text", http_request.GetResponseHeader("Content-Type")); EXPECT_EQ("", http_request.GetResponseHeader("Not-Seen-Header")); } TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); auto content_filename = io::JoinPath(testing::TmpDir(), "content"); std::ofstream content(content_filename, std::ofstream::binary); content << "post body content"; content.close(); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 0)); TF_EXPECT_OK(http_request.Send()); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ(2, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]); EXPECT_TRUE(libcurl.is_put_); EXPECT_EQ("post body content", libcurl.posted_content_); std::remove(content_filename.c_str()); } TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile_NonZeroOffset) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); auto content_filename = io::JoinPath(testing::TmpDir(), "content"); std::ofstream content(content_filename, std::ofstream::binary); content << "post body content"; content.close(); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 7)); TF_EXPECT_OK(http_request.Send()); EXPECT_EQ("dy content", libcurl.posted_content_); std::remove(content_filename.c_str()); } TEST(CurlHttpRequestTest, PutRequest_WithoutBody) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetPutEmptyBody(); TF_EXPECT_OK(http_request.Send()); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ(3, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]); EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]); EXPECT_TRUE(libcurl.is_put_); EXPECT_EQ("", libcurl.posted_content_); } TEST(CurlHttpRequestTest, PostRequest_WithBody_FromMemory) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); string content = "post body content"; http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetPostFromBuffer(content.c_str(), content.size()); TF_EXPECT_OK(http_request.Send()); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ(2, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]); EXPECT_TRUE(libcurl.is_post_); EXPECT_EQ("post body content", libcurl.posted_content_); } TEST(CurlHttpRequestTest, PostRequest_WithoutBody) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetPostEmptyBody(); TF_EXPECT_OK(http_request.Send()); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("", libcurl.custom_request_); EXPECT_EQ(3, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]); EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]); EXPECT_TRUE(libcurl.is_post_); EXPECT_EQ("", libcurl.posted_content_); } TEST(CurlHttpRequestTest, DeleteRequest) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetDeleteRequest(); TF_EXPECT_OK(http_request.Send()); EXPECT_TRUE(libcurl.is_initialized_); EXPECT_EQ("http: EXPECT_EQ("DELETE", libcurl.custom_request_); EXPECT_EQ(1, libcurl.headers_->size()); EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]); EXPECT_FALSE(libcurl.is_post_); } TEST(CurlHttpRequestTest, WrongSequenceOfCalls_NoUri) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); ASSERT_DEATH((void)http_request.Send(), "URI has not been set"); } TEST(CurlHttpRequestTest, WrongSequenceOfCalls_TwoSends) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetUri("http: TF_EXPECT_OK(http_request.Send()); ASSERT_DEATH((void)http_request.Send(), "The request has already been sent"); } TEST(CurlHttpRequestTest, WrongSequenceOfCalls_ReusingAfterSend) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetUri("http: TF_EXPECT_OK(http_request.Send()); ASSERT_DEATH(http_request.SetUri("http: "The request has already been sent"); } TEST(CurlHttpRequestTest, WrongSequenceOfCalls_SettingMethodTwice) { FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetDeleteRequest(); ASSERT_DEATH(http_request.SetPostEmptyBody(), "HTTP method has been already set"); } TEST(CurlHttpRequestTest, EscapeString) { FakeLibCurl libcurl("get response", 200); CurlHttpRequest http_request(&libcurl); const string test_string = "a/b/c"; EXPECT_EQ("a%2Fb%2Fc", http_request.EscapeString(test_string)); } TEST(CurlHttpRequestTest, ErrorReturnsNoResponse) { FakeLibCurl libcurl("get response", 500); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end()); scratch.reserve(100); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBuffer(&scratch); EXPECT_EQ(error::UNAVAILABLE, http_request.Send().code()); EXPECT_EQ("", string(scratch.begin(), scratch.end())); } TEST(CurlHttpRequestTest, ProgressIsOk) { FakeEnv env; FakeLibCurl libcurl( "test", 200, { std::make_tuple(100, 0) , std::make_tuple(110, 0) , std::make_tuple(200, 100) }, &env); CurlHttpRequest http_request(&libcurl, &env); http_request.SetUri("http: TF_EXPECT_OK(http_request.Send()); } TEST(CurlHttpRequestTest, ProgressIsStuck) { FakeEnv env; FakeLibCurl libcurl( "test", 200, { std::make_tuple(100, 10) , std::make_tuple(130, 10) , std::make_tuple(170, 10) }, &env); CurlHttpRequest http_request(&libcurl, &env); http_request.SetUri("http: auto status = http_request.Send(); EXPECT_EQ(error::UNAVAILABLE, status.code()); EXPECT_EQ( "Error executing an HTTP request: libcurl code 42 meaning 'Operation " "was aborted by an application callback', error details: (none)", status.message()); } class TestStats : public HttpRequest::RequestStats { public: ~TestStats() override = default; void RecordRequest(const HttpRequest* request, const string& uri, HttpRequest::RequestMethod method) override { has_recorded_request_ = true; record_request_request_ = request; record_request_uri_ = uri; record_request_method_ = method; } void RecordResponse(const HttpRequest* request, const string& uri, HttpRequest::RequestMethod method, const absl::Status& result) override { has_recorded_response_ = true; record_response_request_ = request; record_response_uri_ = uri; record_response_method_ = method; record_response_result_ = result; } const HttpRequest* record_request_request_ = nullptr; string record_request_uri_ = "http: HttpRequest::RequestMethod record_request_method_ = HttpRequest::RequestMethod::kGet; const HttpRequest* record_response_request_ = nullptr; string record_response_uri_ = "http: HttpRequest::RequestMethod record_response_method_ = HttpRequest::RequestMethod::kGet; absl::Status record_response_result_; bool has_recorded_request_ = false; bool has_recorded_response_ = false; }; class StatsTestFakeLibCurl : public FakeLibCurl { public: StatsTestFakeLibCurl(TestStats* stats, const string& response_content, uint64 response_code) : FakeLibCurl(response_content, response_code), stats_(stats) {} CURLcode curl_easy_perform(CURL* curl) override { CHECK(!performed_request_); performed_request_ = true; stats_had_recorded_request_ = stats_->has_recorded_request_; stats_had_recorded_response_ = stats_->has_recorded_response_; return FakeLibCurl::curl_easy_perform(curl); }; TestStats* stats_; bool performed_request_ = false; bool stats_had_recorded_request_; bool stats_had_recorded_response_; }; TEST(CurlHttpRequestTest, StatsGetSuccessful) { TestStats stats; StatsTestFakeLibCurl libcurl(&stats, "get response", 200); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end()); scratch.reserve(100); http_request.SetRequestStats(&stats); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBuffer(&scratch); TF_EXPECT_OK(http_request.Send()); EXPECT_EQ("get response", string(scratch.begin(), scratch.end())); ASSERT_TRUE(stats.has_recorded_request_); EXPECT_EQ(&http_request, stats.record_request_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_); ASSERT_TRUE(stats.has_recorded_response_); EXPECT_EQ(&http_request, stats.record_response_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_); TF_EXPECT_OK(stats.record_response_result_); EXPECT_TRUE(libcurl.performed_request_); EXPECT_TRUE(libcurl.stats_had_recorded_request_); EXPECT_FALSE(libcurl.stats_had_recorded_response_); } TEST(CurlHttpRequestTest, StatsGetNotFound) { TestStats stats; StatsTestFakeLibCurl libcurl(&stats, "get other response", 404); CurlHttpRequest http_request(&libcurl); std::vector<char> scratch; scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end()); scratch.reserve(100); http_request.SetRequestStats(&stats); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetRange(100, 199); http_request.SetResultBuffer(&scratch); absl::Status s = http_request.Send(); ASSERT_TRUE(stats.has_recorded_request_); EXPECT_EQ(&http_request, stats.record_request_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_); ASSERT_TRUE(stats.has_recorded_response_); EXPECT_EQ(&http_request, stats.record_response_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_); EXPECT_TRUE(absl::IsNotFound(stats.record_response_result_)); EXPECT_EQ(s, stats.record_response_result_); EXPECT_TRUE(libcurl.performed_request_); EXPECT_TRUE(libcurl.stats_had_recorded_request_); EXPECT_FALSE(libcurl.stats_had_recorded_response_); } TEST(CurlHttpRequestTest, StatsPost) { TestStats stats; FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetRequestStats(&stats); string content = "post body content"; http_request.SetUri("http: http_request.SetPostFromBuffer(content.c_str(), content.size()); TF_EXPECT_OK(http_request.Send()); ASSERT_TRUE(stats.has_recorded_request_); EXPECT_EQ(&http_request, stats.record_request_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kPost, stats.record_request_method_); ASSERT_TRUE(stats.has_recorded_response_); EXPECT_EQ(&http_request, stats.record_response_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kPost, stats.record_response_method_); TF_EXPECT_OK(stats.record_response_result_); } TEST(CurlHttpRequestTest, StatsDelete) { TestStats stats; FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetRequestStats(&stats); http_request.SetUri("http: http_request.SetDeleteRequest(); TF_EXPECT_OK(http_request.Send()); ASSERT_TRUE(stats.has_recorded_request_); EXPECT_EQ(&http_request, stats.record_request_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kDelete, stats.record_request_method_); ASSERT_TRUE(stats.has_recorded_response_); EXPECT_EQ(&http_request, stats.record_response_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kDelete, stats.record_response_method_); TF_EXPECT_OK(stats.record_response_result_); } TEST(CurlHttpRequestTest, StatsPut) { TestStats stats; FakeLibCurl libcurl("", 200); CurlHttpRequest http_request(&libcurl); http_request.SetRequestStats(&stats); http_request.SetUri("http: http_request.AddAuthBearerHeader("fake-bearer"); http_request.SetPutEmptyBody(); TF_EXPECT_OK(http_request.Send()); ASSERT_TRUE(stats.has_recorded_request_); EXPECT_EQ(&http_request, stats.record_request_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kPut, stats.record_request_method_); ASSERT_TRUE(stats.has_recorded_response_); EXPECT_EQ(&http_request, stats.record_response_request_); EXPECT_EQ("http: EXPECT_EQ(HttpRequest::RequestMethod::kPut, stats.record_response_method_); TF_EXPECT_OK(stats.record_response_result_); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
37b9278e-0443-49f5-b552-6a82d777f3ab
cpp
tensorflow/tensorflow
oauth_client
third_party/xla/third_party/tsl/tsl/platform/cloud/oauth_client.cc
third_party/xla/third_party/tsl/tsl/platform/cloud/oauth_client_test.cc
#include "tsl/platform/cloud/oauth_client.h" #ifndef _WIN32 #include <pwd.h> #include <sys/types.h> #include <unistd.h> #else #include <sys/types.h> #endif #include <fstream> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/pem.h> #include <openssl/rsa.h> #include "tsl/platform/base64.h" #include "tsl/platform/cloud/curl_http_request.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" namespace tsl { namespace { constexpr int kRequestedTokenLifetimeSec = 3600; constexpr char kCryptoAlgorithm[] = "RS256"; constexpr char kJwtType[] = "JWT"; constexpr char kGrantType[] = "urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer"; absl::Status ReadJsonValue(const Json::Value& json, const string& name, Json::Value* value) { if (!value) { return errors::FailedPrecondition("'value' cannot be nullptr."); } *value = json.get(name, Json::Value::null); if (*value == Json::Value::null) { return errors::FailedPrecondition( strings::StrCat("Couldn't read a JSON value '", name, "'.")); } return absl::OkStatus(); } absl::Status ReadJsonString(const Json::Value& json, const string& name, string* value) { Json::Value json_value; TF_RETURN_IF_ERROR(ReadJsonValue(json, name, &json_value)); if (!json_value.isString()) { return errors::FailedPrecondition( strings::StrCat("JSON value '", name, "' is not string.")); } *value = json_value.asString(); return absl::OkStatus(); } absl::Status ReadJsonInt(const Json::Value& json, const string& name, int64_t* value) { Json::Value json_value; TF_RETURN_IF_ERROR(ReadJsonValue(json, name, &json_value)); if (!json_value.isIntegral()) { return errors::FailedPrecondition( strings::StrCat("JSON value '", name, "' is not integer.")); } *value = json_value.asInt64(); return absl::OkStatus(); } absl::Status CreateSignature(RSA* private_key, absl::string_view to_sign, string* signature) { if (!private_key || !signature) { return errors::FailedPrecondition( "'private_key' and 'signature' cannot be nullptr."); } const auto md = EVP_sha256(); if (!md) { return errors::Internal("Could not get a sha256 encryptor."); } std::unique_ptr<EVP_MD_CTX, std::function<void(EVP_MD_CTX*)>> md_ctx( EVP_MD_CTX_create(), [](EVP_MD_CTX* ptr) { EVP_MD_CTX_destroy(ptr); }); if (!md_ctx) { return errors::Internal("Could not create MD_CTX."); } std::unique_ptr<EVP_PKEY, std::function<void(EVP_PKEY*)>> key( EVP_PKEY_new(), [](EVP_PKEY* ptr) { EVP_PKEY_free(ptr); }); EVP_PKEY_set1_RSA(key.get(), private_key); if (EVP_DigestSignInit(md_ctx.get(), nullptr, md, nullptr, key.get()) != 1) { return errors::Internal("DigestInit failed."); } if (EVP_DigestSignUpdate(md_ctx.get(), to_sign.data(), to_sign.size()) != 1) { return errors::Internal("DigestUpdate failed."); } size_t sig_len = 0; if (EVP_DigestSignFinal(md_ctx.get(), nullptr, &sig_len) != 1) { return errors::Internal("DigestFinal (get signature length) failed."); } std::unique_ptr<unsigned char[]> sig(new unsigned char[sig_len]); if (EVP_DigestSignFinal(md_ctx.get(), sig.get(), &sig_len) != 1) { return errors::Internal("DigestFinal (signature compute) failed."); } return Base64Encode( absl::string_view(reinterpret_cast<char*>(sig.get()), sig_len), signature); } absl::Status EncodeJwtClaim(absl::string_view client_email, absl::string_view scope, absl::string_view audience, uint64 request_timestamp_sec, string* encoded) { Json::Value root; root["iss"] = Json::Value(client_email.data(), client_email.data() + client_email.size()); root["scope"] = Json::Value(scope.data(), scope.data() + scope.size()); root["aud"] = Json::Value(audience.data(), audience.data() + audience.size()); const auto expiration_timestamp_sec = request_timestamp_sec + kRequestedTokenLifetimeSec; root["iat"] = Json::Value::UInt64(request_timestamp_sec); root["exp"] = Json::Value::UInt64(expiration_timestamp_sec); string claim = root.toStyledString(); return Base64Encode(claim, encoded); } absl::Status EncodeJwtHeader(absl::string_view key_id, string* encoded) { Json::Value root; root["alg"] = kCryptoAlgorithm; root["typ"] = kJwtType; root["kid"] = Json::Value(key_id.data(), key_id.data() + key_id.size()); const string header = root.toStyledString(); return Base64Encode(header, encoded); } } OAuthClient::OAuthClient() : OAuthClient( std::unique_ptr<HttpRequest::Factory>(new CurlHttpRequest::Factory()), Env::Default()) {} OAuthClient::OAuthClient( std::unique_ptr<HttpRequest::Factory> http_request_factory, Env* env) : http_request_factory_(std::move(http_request_factory)), env_(env) {} absl::Status OAuthClient::GetTokenFromServiceAccountJson( Json::Value json, absl::string_view oauth_server_uri, absl::string_view scope, string* token, uint64* expiration_timestamp_sec) { if (!token || !expiration_timestamp_sec) { return errors::FailedPrecondition( "'token' and 'expiration_timestamp_sec' cannot be nullptr."); } string private_key_serialized, private_key_id, client_id, client_email; TF_RETURN_IF_ERROR( ReadJsonString(json, "private_key", &private_key_serialized)); TF_RETURN_IF_ERROR(ReadJsonString(json, "private_key_id", &private_key_id)); TF_RETURN_IF_ERROR(ReadJsonString(json, "client_id", &client_id)); TF_RETURN_IF_ERROR(ReadJsonString(json, "client_email", &client_email)); std::unique_ptr<BIO, std::function<void(BIO*)>> bio( BIO_new(BIO_s_mem()), [](BIO* ptr) { BIO_free_all(ptr); }); if (BIO_puts(bio.get(), private_key_serialized.c_str()) != static_cast<int>(private_key_serialized.size())) { return errors::Internal("Could not load the private key."); } std::unique_ptr<RSA, std::function<void(RSA*)>> private_key( PEM_read_bio_RSAPrivateKey(bio.get(), nullptr, nullptr, nullptr), [](RSA* ptr) { RSA_free(ptr); }); if (!private_key) { return errors::Internal("Could not deserialize the private key."); } const uint64 request_timestamp_sec = env_->NowSeconds(); string encoded_claim, encoded_header; TF_RETURN_IF_ERROR(EncodeJwtHeader(private_key_id, &encoded_header)); TF_RETURN_IF_ERROR(EncodeJwtClaim(client_email, scope, oauth_server_uri, request_timestamp_sec, &encoded_claim)); const string to_sign = encoded_header + "." + encoded_claim; string signature; TF_RETURN_IF_ERROR(CreateSignature(private_key.get(), to_sign, &signature)); const string jwt = to_sign + "." + signature; const string request_body = strings::StrCat("grant_type=", kGrantType, "&assertion=", jwt); std::unique_ptr<HttpRequest> request(http_request_factory_->Create()); std::vector<char> response_buffer; request->SetUri(string(oauth_server_uri)); request->SetPostFromBuffer(request_body.c_str(), request_body.size()); request->SetResultBuffer(&response_buffer); TF_RETURN_IF_ERROR(request->Send()); absl::string_view response = absl::string_view(response_buffer.data(), response_buffer.size()); TF_RETURN_IF_ERROR(ParseOAuthResponse(response, request_timestamp_sec, token, expiration_timestamp_sec)); return absl::OkStatus(); } absl::Status OAuthClient::GetTokenFromRefreshTokenJson( Json::Value json, absl::string_view oauth_server_uri, string* token, uint64* expiration_timestamp_sec) { if (!token || !expiration_timestamp_sec) { return errors::FailedPrecondition( "'token' and 'expiration_timestamp_sec' cannot be nullptr."); } string client_id, client_secret, refresh_token; TF_RETURN_IF_ERROR(ReadJsonString(json, "client_id", &client_id)); TF_RETURN_IF_ERROR(ReadJsonString(json, "client_secret", &client_secret)); TF_RETURN_IF_ERROR(ReadJsonString(json, "refresh_token", &refresh_token)); const auto request_body = strings::StrCat( "client_id=", client_id, "&client_secret=", client_secret, "&refresh_token=", refresh_token, "&grant_type=refresh_token"); const uint64 request_timestamp_sec = env_->NowSeconds(); std::unique_ptr<HttpRequest> request(http_request_factory_->Create()); std::vector<char> response_buffer; request->SetUri(string(oauth_server_uri)); request->SetPostFromBuffer(request_body.c_str(), request_body.size()); request->SetResultBuffer(&response_buffer); TF_RETURN_IF_ERROR(request->Send()); absl::string_view response = absl::string_view(response_buffer.data(), response_buffer.size()); TF_RETURN_IF_ERROR(ParseOAuthResponse(response, request_timestamp_sec, token, expiration_timestamp_sec)); return absl::OkStatus(); } absl::Status OAuthClient::ParseOAuthResponse(absl::string_view response, uint64 request_timestamp_sec, string* token, uint64* expiration_timestamp_sec) { if (!token || !expiration_timestamp_sec) { return errors::FailedPrecondition( "'token' and 'expiration_timestamp_sec' cannot be nullptr."); } Json::Value root; Json::Reader reader; if (!reader.parse(response.data(), response.data() + response.size(), root)) { return errors::Internal("Couldn't parse JSON response from OAuth server."); } string token_type; TF_RETURN_IF_ERROR(ReadJsonString(root, "token_type", &token_type)); if (token_type != "Bearer") { return errors::FailedPrecondition("Unexpected Oauth token type: " + token_type); } int64_t expires_in = 0; TF_RETURN_IF_ERROR(ReadJsonInt(root, "expires_in", &expires_in)); *expiration_timestamp_sec = request_timestamp_sec + expires_in; TF_RETURN_IF_ERROR(ReadJsonString(root, "access_token", token)); return absl::OkStatus(); } }
#include "tsl/platform/cloud/oauth_client.h" #include <fstream> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/pem.h> #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/base64.h" #include "tsl/platform/cloud/http_request_fake.h" #include "tsl/platform/env.h" #include "tsl/platform/path.h" #include "tsl/platform/scanner.h" #include "tsl/platform/test.h" namespace tsl { namespace { string TestData() { return io::JoinPath(testing::TslSrcRoot(), "platform", "cloud", "testdata"); } constexpr char kTokenJson[] = R"( { "access_token":"WITH_FAKE_ACCESS_TOKEN_TEST_SHOULD_BE_HAPPY", "expires_in":3920, "token_type":"Bearer" })"; class FakeEnv : public EnvWrapper { public: FakeEnv() : EnvWrapper(Env::Default()) {} uint64 NowSeconds() const override { return now; } uint64 now = 10000; }; } TEST(OAuthClientTest, ParseOAuthResponse) { const uint64 request_timestamp = 100; string token; uint64 expiration_timestamp; TF_EXPECT_OK(OAuthClient().ParseOAuthResponse(kTokenJson, request_timestamp, &token, &expiration_timestamp)); EXPECT_EQ("WITH_FAKE_ACCESS_TOKEN_TEST_SHOULD_BE_HAPPY", token); EXPECT_EQ(4020, expiration_timestamp); } TEST(OAuthClientTest, GetTokenFromRefreshTokenJson) { const string credentials_json = R"( { "client_id": "test_client_id", "client_secret": "@@@test_client_secret@@@", "refresh_token": "test_refresh_token", "type": "authorized_user" })"; Json::Value json; Json::Reader reader; ASSERT_TRUE(reader.parse(credentials_json, json)); std::vector<HttpRequest*> requests({new FakeHttpRequest( "Uri: https: "Post body: client_id=test_client_id&" "client_secret=@@@test_client_secret@@@&" "refresh_token=test_refresh_token&grant_type=refresh_token\n", kTokenJson)}); FakeEnv env; OAuthClient client(std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), &env); string token; uint64 expiration_timestamp; TF_EXPECT_OK(client.GetTokenFromRefreshTokenJson( json, "https: &expiration_timestamp)); EXPECT_EQ("WITH_FAKE_ACCESS_TOKEN_TEST_SHOULD_BE_HAPPY", token); EXPECT_EQ(13920, expiration_timestamp); } TEST(OAuthClientTest, GetTokenFromServiceAccountJson) { std::ifstream credentials( io::JoinPath(TestData(), "service_account_credentials.json")); ASSERT_TRUE(credentials.is_open()); Json::Value json; Json::Reader reader; ASSERT_TRUE(reader.parse(credentials, json)); string post_body; std::vector<HttpRequest*> requests( {new FakeHttpRequest("Uri: https: kTokenJson, &post_body)}); FakeEnv env; OAuthClient client(std::unique_ptr<HttpRequest::Factory>( new FakeHttpRequestFactory(&requests)), &env); string token; uint64 expiration_timestamp; TF_EXPECT_OK(client.GetTokenFromServiceAccountJson( json, "https: "https: EXPECT_EQ("WITH_FAKE_ACCESS_TOKEN_TEST_SHOULD_BE_HAPPY", token); EXPECT_EQ(13920, expiration_timestamp); absl::string_view grant_type, assertion; ASSERT_TRUE(strings::Scanner(post_body) .OneLiteral("grant_type=") .RestartCapture() .ScanEscapedUntil('&') .StopCapture() .OneLiteral("&assertion=") .GetResult(&assertion, &grant_type)); EXPECT_EQ("urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer", grant_type); int last_dot = assertion.rfind('.'); string header_dot_claim(assertion.substr(0, last_dot)); string signature_encoded(assertion.substr(last_dot + 1)); std::ifstream public_key_stream( io::JoinPath(TestData(), "service_account_public_key.txt")); string public_key_serialized( (std::istreambuf_iterator<char>(public_key_stream)), (std::istreambuf_iterator<char>())); auto bio = BIO_new(BIO_s_mem()); RSA* public_key = nullptr; EXPECT_EQ(public_key_serialized.size(), BIO_puts(bio, public_key_serialized.c_str())); public_key = PEM_read_bio_RSA_PUBKEY(bio, nullptr, nullptr, nullptr); EXPECT_TRUE(public_key) << "Could not load the public key from testdata."; string signature; TF_EXPECT_OK(Base64Decode(signature_encoded, &signature)); const auto md = EVP_sha256(); auto md_ctx = EVP_MD_CTX_create(); auto key = EVP_PKEY_new(); EVP_PKEY_set1_RSA(key, public_key); ASSERT_EQ(1, EVP_DigestVerifyInit(md_ctx, nullptr, md, nullptr, key)); ASSERT_EQ(1, EVP_DigestVerifyUpdate(md_ctx, header_dot_claim.c_str(), header_dot_claim.size())); ASSERT_EQ(1, EVP_DigestVerifyFinal( md_ctx, const_cast<unsigned char*>( reinterpret_cast<const unsigned char*>(signature.data())), signature.size())); EVP_PKEY_free(key); EVP_MD_CTX_destroy(md_ctx); RSA_free(public_key); BIO_free_all(bio); int dot = header_dot_claim.find_last_of('.'); string header_encoded = header_dot_claim.substr(0, dot); string claim_encoded = header_dot_claim.substr(dot + 1); string header, claim; TF_EXPECT_OK(Base64Decode(header_encoded, &header)); TF_EXPECT_OK(Base64Decode(claim_encoded, &claim)); Json::Value header_json, claim_json; EXPECT_TRUE(reader.parse(header, header_json)); EXPECT_EQ("RS256", header_json.get("alg", Json::Value::null).asString()); EXPECT_EQ("JWT", header_json.get("typ", Json::Value::null).asString()); EXPECT_EQ("fake_key_id", header_json.get("kid", Json::Value::null).asString()); EXPECT_TRUE(reader.parse(claim, claim_json)); EXPECT_EQ("fake-test-project.iam.gserviceaccount.com", claim_json.get("iss", Json::Value::null).asString()); EXPECT_EQ("https: claim_json.get("scope", Json::Value::null).asString()); EXPECT_EQ("https: claim_json.get("aud", Json::Value::null).asString()); EXPECT_EQ(10000, claim_json.get("iat", Json::Value::null).asInt64()); EXPECT_EQ(13600, claim_json.get("exp", Json::Value::null).asInt64()); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/oauth_client.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/oauth_client_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a5a37f0d-aeff-4608-bde1-db87689d2a23
cpp
tensorflow/tensorflow
stacktrace
third_party/xla/third_party/tsl/tsl/platform/windows/stacktrace.cc
third_party/xla/third_party/tsl/tsl/platform/stacktrace_test.cc
#include "tsl/platform/windows/stacktrace.h" #include <windows.h> #include <dbghelp.h> #include <string> #include "tsl/platform/mutex.h" #pragma comment(lib, "dbghelp.lib") namespace tsl { static bool SymbolsAreAvailableInit() { SymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS); return SymInitialize(GetCurrentProcess(), NULL, true); } static bool SymbolsAreAvailable() { static bool kSymbolsAvailable = SymbolsAreAvailableInit(); return kSymbolsAvailable; } std::string CurrentStackTrace() { HANDLE current_process = GetCurrentProcess(); static constexpr int kMaxStackFrames = 64; void* trace[kMaxStackFrames]; int num_frames = CaptureStackBackTrace(0, kMaxStackFrames, trace, NULL); static mutex mu(tsl::LINKER_INITIALIZED); std::string stacktrace; for (int i = 0; i < num_frames; ++i) { const char* symbol = "(unknown)"; if (SymbolsAreAvailable()) { char symbol_info_buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(TCHAR)]; SYMBOL_INFO* symbol_ptr = reinterpret_cast<SYMBOL_INFO*>(symbol_info_buffer); symbol_ptr->SizeOfStruct = sizeof(SYMBOL_INFO); symbol_ptr->MaxNameLen = MAX_SYM_NAME; mutex_lock lock(mu); if (SymFromAddr(current_process, reinterpret_cast<DWORD64>(trace[i]), 0, symbol_ptr)) { symbol = symbol_ptr->Name; } } char buffer[256]; snprintf(buffer, sizeof(buffer), "0x%p\t%s", trace[i], symbol); stacktrace += buffer; stacktrace += "\n"; } return stacktrace; } }
#include "tsl/platform/stacktrace.h" #include <string> #include "tsl/platform/logging.h" #include "tsl/platform/test.h" namespace tsl { namespace { #if defined(TF_HAS_STACKTRACE) TEST(StacktraceTest, StacktraceWorks) { std::string stacktrace = CurrentStackTrace(); LOG(INFO) << "CurrentStackTrace():\n" << stacktrace; std::string expected_frame = "testing::internal::UnitTestImpl::RunAllTests"; EXPECT_NE(stacktrace.find(expected_frame), std::string::npos); } #endif } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/windows/stacktrace.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stacktrace_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b31eb974-d584-438f-959b-88eafc6895b6
cpp
tensorflow/tensorflow
profiler_factory
third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory.cc
third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc
#include "tsl/profiler/lib/profiler_factory.h" #include <memory> #include <utility> #include <vector> #include "tsl/platform/mutex.h" #include "tsl/profiler/lib/profiler_controller.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/profiler_options.pb.h" namespace tsl { namespace profiler { namespace { mutex mu(LINKER_INITIALIZED); std::vector<ProfilerFactory>* GetFactories() { static auto factories = new std::vector<ProfilerFactory>(); return factories; } } void RegisterProfilerFactory(ProfilerFactory factory) { mutex_lock lock(mu); GetFactories()->push_back(std::move(factory)); } std::vector<std::unique_ptr<profiler::ProfilerInterface>> CreateProfilers( const tensorflow::ProfileOptions& options) { std::vector<std::unique_ptr<profiler::ProfilerInterface>> result; mutex_lock lock(mu); for (const auto& factory : *GetFactories()) { auto profiler = factory(options); if (profiler == nullptr) continue; result.emplace_back( std::make_unique<ProfilerController>(std::move(profiler))); } return result; } void ClearRegisteredProfilersForTest() { mutex_lock lock(mu); GetFactories()->clear(); } } }
#include "tsl/profiler/lib/profiler_factory.h" #include <functional> #include <utility> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "tsl/platform/macros.h" #include "tsl/platform/test.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/profiler_options.pb.h" #include "tsl/profiler/protobuf/xplane.pb.h" namespace tsl { namespace profiler { namespace { class TestProfiler : public ProfilerInterface { public: absl::Status Start() override { return absl::OkStatus(); } absl::Status Stop() override { return absl::OkStatus(); } absl::Status CollectData(tensorflow::profiler::XSpace*) override { return absl::OkStatus(); } }; std::unique_ptr<ProfilerInterface> TestFactoryFunction( const tensorflow::ProfileOptions& options) { return absl::make_unique<TestProfiler>(); } TEST(ProfilerFactoryTest, FactoryFunctionPointer) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory(&TestFactoryFunction); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } TEST(ProfilerFactoryTest, FactoryLambda) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory([](const tensorflow::ProfileOptions& options) { return absl::make_unique<TestProfiler>(); }); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } std::unique_ptr<ProfilerInterface> NullFactoryFunction( const tensorflow::ProfileOptions& options) { return nullptr; } TEST(ProfilerFactoryTest, FactoryReturnsNull) { ClearRegisteredProfilersForTest(); RegisterProfilerFactory(&NullFactoryFunction); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_TRUE(profilers.empty()); } class FactoryClass { public: explicit FactoryClass(void* ptr) : ptr_(ptr) {} FactoryClass(const FactoryClass&) = default; FactoryClass(FactoryClass&&) = default; std::unique_ptr<ProfilerInterface> CreateProfiler( const tensorflow::ProfileOptions& options) const { return absl::make_unique<TestProfiler>(); } private: void* ptr_ TF_ATTRIBUTE_UNUSED = nullptr; }; TEST(ProfilerFactoryTest, FactoryClassCapturedByLambda) { ClearRegisteredProfilersForTest(); static int token = 42; FactoryClass factory(&token); RegisterProfilerFactory([factory = std::move(factory)]( const tensorflow::ProfileOptions& options) { return factory.CreateProfiler(options); }); auto profilers = CreateProfilers(tensorflow::ProfileOptions()); EXPECT_EQ(profilers.size(), 1); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
10d27e5d-535e-4d4c-b8ea-82d4c765fccf
cpp
tensorflow/tensorflow
profiler_lock
third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.cc
third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock_test.cc
#include "tsl/profiler/lib/profiler_lock.h" #include <atomic> #include "absl/status/statusor.h" #include "xla/tsl/util/env_var.h" #include "tsl/platform/errors.h" #include "tsl/platform/macros.h" namespace tsl { namespace profiler { namespace { std::atomic<int> g_session_active = ATOMIC_VAR_INIT(0); static_assert(ATOMIC_INT_LOCK_FREE == 2, "Assumed atomic<int> was lock free"); } bool ProfilerLock::HasActiveSession() { return g_session_active.load(std::memory_order_relaxed) != 0; } absl::StatusOr<ProfilerLock> ProfilerLock::Acquire() { static bool tf_profiler_disabled = [] { bool disabled = false; ReadBoolFromEnvVar("TF_DISABLE_PROFILING", false, &disabled).IgnoreError(); return disabled; }(); if (TF_PREDICT_FALSE(tf_profiler_disabled)) { return errors::AlreadyExists( "TensorFlow Profiler is permanently disabled by env var " "TF_DISABLE_PROFILING."); } int already_active = g_session_active.exchange(1, std::memory_order_acq_rel); if (already_active) { return errors::AlreadyExists(kProfilerLockContention); } return ProfilerLock(true); } void ProfilerLock::ReleaseIfActive() { if (active_) { g_session_active.store(0, std::memory_order_release); active_ = false; } } } }
#include "tsl/profiler/lib/profiler_lock.h" #include <utility> #include "absl/status/statusor.h" #include "tsl/platform/test.h" namespace tsl { namespace profiler { namespace { TEST(ProfilerLockTest, DefaultConstructorCreatesInactiveInstance) { ProfilerLock profiler_lock; EXPECT_FALSE(profiler_lock.Active()); } TEST(ProfilerLockTest, AcquireAndReleaseExplicitly) { absl::StatusOr<ProfilerLock> profiler_lock = ProfilerLock::Acquire(); ASSERT_TRUE(profiler_lock.ok()); EXPECT_TRUE(profiler_lock->Active()); profiler_lock->ReleaseIfActive(); EXPECT_FALSE(profiler_lock->Active()); } TEST(ProfilerLockTest, AcquireAndReleaseOnDestruction) { absl::StatusOr<ProfilerLock> profiler_lock = ProfilerLock::Acquire(); ASSERT_TRUE(profiler_lock.ok()); EXPECT_TRUE(profiler_lock->Active()); } TEST(ProfilerLockTest, ReacquireWithoutReleaseFails) { absl::StatusOr<ProfilerLock> profiler_lock_1 = ProfilerLock::Acquire(); absl::StatusOr<ProfilerLock> profiler_lock_2 = ProfilerLock::Acquire(); ASSERT_TRUE(profiler_lock_1.ok()); EXPECT_TRUE(profiler_lock_1->Active()); EXPECT_FALSE(profiler_lock_2.ok()); } TEST(ProfilerLockTest, ReacquireAfterReleaseSucceeds) { auto profiler_lock_1 = ProfilerLock::Acquire(); ASSERT_TRUE(profiler_lock_1.ok()); ASSERT_TRUE(profiler_lock_1->Active()); profiler_lock_1->ReleaseIfActive(); ASSERT_FALSE(profiler_lock_1->Active()); auto profiler_lock_2 = ProfilerLock::Acquire(); EXPECT_TRUE(profiler_lock_2.ok()); EXPECT_TRUE(profiler_lock_2->Active()); } TEST(ProfilerLockTest, InactiveAfterMove) { absl::StatusOr<ProfilerLock> profiler_lock_1 = ProfilerLock::Acquire(); ASSERT_TRUE(profiler_lock_1.ok()); ASSERT_TRUE(profiler_lock_1->Active()); ProfilerLock profiler_lock_2 = std::move(*profiler_lock_1); EXPECT_FALSE(profiler_lock_1->Active()); EXPECT_TRUE(profiler_lock_2.Active()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f923ab04-73b0-476e-b5d4-3c77edeea188
cpp
tensorflow/tensorflow
fft
tensorflow/lite/experimental/microfrontend/lib/fft.cc
third_party/xla/xla/tests/fft_test.cc
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h" #include <string.h> #define FIXED_POINT 16 #include "kiss_fft.h" #include "kiss_fftr.h" void FftCompute(struct FftState* state, const int16_t* input, int input_scale_shift) { const size_t input_size = state->input_size; const size_t fft_size = state->fft_size; int16_t* fft_input = state->input; size_t i; for (i = 0; i < input_size; ++i) { fft_input[i] = static_cast<int16_t>(static_cast<uint16_t>(input[i]) << input_scale_shift); } for (; i < fft_size; ++i) { fft_input[i] = 0; } kiss_fftr(reinterpret_cast<kiss_fftr_cfg>(state->scratch), state->input, reinterpret_cast<kiss_fft_cpx*>(state->output)); } void FftInit(struct FftState* state) { } void FftReset(struct FftState* state) { memset(state->input, 0, state->fft_size * sizeof(*state->input)); memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output)); }
#include "absl/strings/string_view.h" #include "xla/error_spec.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_macros.h" #include "tsl/platform/test.h" namespace xla { namespace { class FftTextTest : public HloTestBase {}; XLA_TEST_F(FftTextTest, Fft) { absl::string_view hlo_string = R"( HloModule Fft_module ENTRY Fft { input = c64[8,32]{0,1} parameter(0) fft = c64[8,32] fft(input), fft_type=FFT, fft_length={32} ROOT transpose = c64[32,8] transpose(fft), dimensions={1,0} })"; EXPECT_TRUE(RunAndCompare(hlo_string, ErrorSpec{4e-3, 4e-3})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/microfrontend/lib/fft.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/fft_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
479417f4-59f9-4d2f-86c8-5736fa232d4f
cpp
tensorflow/tensorflow
spmd_expander
tensorflow/dtensor/mlir/spmd_expander.cc
tensorflow/dtensor/tests/spmd_expander_test.cc
#include "tensorflow/dtensor/mlir/spmd_expander.h" #include <climits> #include <cstdint> #include <iterator> #include <memory> #include <optional> #include <string> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/types/optional.h" #include "absl/types/span.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Casting.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Operation.h" #include "mlir/IR/OperationSupport.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/core/framework/registration/registration.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/dtensor_utils.h" #include "tensorflow/dtensor/cc/tensor_layout.h" #include "tensorflow/dtensor/mlir/expansions/replicated_spmd_expander.h" #include "tensorflow/dtensor/mlir/ir/tf_dtensor.h" #include "tensorflow/dtensor/mlir/layout_parsing.h" #include "tensorflow/dtensor/mlir/op_utils.h" #include "tensorflow/dtensor/mlir/shape_utils.h" #include "tensorflow/dtensor/proto/layout.pb.h" namespace tensorflow { namespace dtensor { namespace { Status AdjustPartedLayout(const llvm::DenseMap<int, Layout>& input_layouts, llvm::DenseMap<int, Layout>* computed_layouts) { bool input_has_parted_layout = false; for (const auto& input_layout : input_layouts) { if (input_layout.second.type() == Layout::LayoutType::kParted) { input_has_parted_layout = true; break; } } if (input_has_parted_layout) { for (auto& computed_layout : *computed_layouts) { TF_ASSIGN_OR_RETURN(Layout parted, computed_layout.second.ToParted()); computed_layout.getSecond() = parted; } } return absl::OkStatus(); } bool SkipExpansionForPartedLayout(mlir::Operation* op) { if (llvm::isa<mlir::func::ReturnOp, mlir::tf_device::ReturnOp>(op)) { return false; } auto status_or_input_layouts = ExtractRequiredLayoutFromOperands(op); if (!status_or_input_layouts.ok()) { return false; } bool operand_uses_parted_layout = false; for (const auto& layout : status_or_input_layouts.value()) { if (layout.type() == Layout::LayoutType::kParted) { operand_uses_parted_layout = true; break; } } return operand_uses_parted_layout; } } SPMDExpanderRegistry* SPMDExpanderRegistry::Global() { static SPMDExpanderRegistry* registry = new SPMDExpanderRegistry(); return registry; } SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForFullOpName( const std::string& full_op_name) { auto key = full_op_name; auto fn = op_to_propagate_fn_map_.find(key); if (fn == op_to_propagate_fn_map_.end()) { if (EnableReplicatedSpmdAsDefault(key)) { LOG(WARNING) << full_op_name << " is defaulting to ReplicatedOpSPMDExpander. This " << " has performance implications as all inputs and outputs " << " will be replicated if they are not already. Please file a " << " feature request to TF DTensor to implement an efficient " << " SPMD for this operation."; RegisterPropagateFn(key, std::make_unique<ReplicatedOpSPMDExpander>( true)); return op_to_propagate_fn_map_.find(key)->second.get(); } else { return nullptr; } } return fn->second.get(); } SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForOp( mlir::Operation* op) { return GetPropagateFnForFullOpName(OpName(op)); } InitOnStartupMarker SPMDExpanderRegistry::RegisterPropagateFn( std::string opName, std::unique_ptr<SPMDExpanderBase> prop) { CHECK(op_to_propagate_fn_map_ .insert_or_assign(opName, std::move(prop)) .second); return {}; } Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op, mlir::Operation** output) { TF_ASSIGN_OR_RETURN(std::vector<std::optional<Layout>> computed_layout, ExtractLayoutFromOp(op)); if (computed_layout.empty() && op->getNumResults() != 0) { return errors::InvalidArgument( absl::StrCat("No attached layout found for op : ", OpName(op), " This might be due to an error in layout propagation.") .c_str()); } TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op)); bool skip_expansion_for_parted_layout = SkipExpansionForPartedLayout(op); if (mesh.IsSingleDevice() || mesh.use_xla_spmd() || skip_expansion_for_parted_layout) { if (skip_expansion_for_parted_layout) { *output = InferSPMDExpandedLocalShape(op); } else { *output = op; } SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>( computed_layout.data(), computed_layout.size())); return absl::OkStatus(); } llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> global_output_shapes; global_output_shapes.reserve(op->getNumResults()); for (auto output_value : op->getResults()) { auto maybe_ranked = mlir::dyn_cast<mlir::RankedTensorType>(output_value.getType()); if (llvm::isa<mlir::TF::RestoreV2Op, mlir::TF::DTensorRestoreV2Op>(op) && (!maybe_ranked || !maybe_ranked.hasStaticShape())) continue; TF_ASSIGN_OR_RETURN(auto global_shape, ExtractGlobalOutputShape(output_value)); global_output_shapes.emplace_back(llvm::SmallVector<int64_t, 4>{ global_shape.begin(), global_shape.end()}); } TF_ASSIGN_OR_RETURN(*output, this->ExpandOp(op)); SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>( computed_layout.data(), computed_layout.size())); for (const auto& output_layout_and_index : llvm::enumerate(llvm::zip((*output)->getResults(), computed_layout))) { const int index = output_layout_and_index.index(); const auto& output_and_layout = output_layout_and_index.value(); auto output_value = std::get<0>(output_and_layout); auto local_expanded_shape_or_status = GetShapeOfValue(output_value); if (!local_expanded_shape_or_status.ok()) continue; const auto local_expanded_shape = local_expanded_shape_or_status.value(); const auto& layout = std::get<1>(output_and_layout); const auto expected_global_shape = layout->GlobalShapeFromLocalShape(local_expanded_shape); for (const auto& expanded_and_true_global_shape : llvm::zip(global_output_shapes[index], expected_global_shape)) { const auto expanded_shape = std::get<0>(expanded_and_true_global_shape); const auto expected_shape = std::get<1>(expanded_and_true_global_shape); if (expanded_shape <= 0 || expected_shape <= 0) continue; if (expanded_shape != expected_shape) { return errors::Internal( "SPMD expansion resulted in op output inconsistent with the " "provided layout. Expected shape: <", absl::StrJoin(expected_global_shape, ","), "> got shape: <", absl::StrJoin(global_output_shapes[index], ","), ">"); } } } return absl::OkStatus(); } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) { return errors::Unimplemented( "ComputeLayoutForward API must be implemented via the subclass."); } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts, const llvm::DenseMap<int, Layout>& output_layouts) { TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op)); if (mesh.IsSingleDevice()) { TF_ASSIGN_OR_RETURN( Layout layout, Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh)); auto layouts = llvm::DenseMap<int, Layout>{}; for (int i = 0; i < op->getNumResults(); ++i) { layouts.insert({i, layout}); } return layouts; } TF_ASSIGN_OR_RETURN(auto layouts, ComputeLayoutForward(op, input_layouts)); TF_RETURN_IF_ERROR(AdjustPartedLayout(input_layouts, &layouts)); return layouts; } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts) { return errors::Unimplemented( "ComputeLayoutBackward API must be implemented via the subclass."); } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts, const llvm::DenseMap<int, Layout>& output_layouts) { TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op)); if (mesh.IsSingleDevice()) { TF_ASSIGN_OR_RETURN( Layout layout, Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh)); auto layouts = llvm::DenseMap<int, Layout>{}; for (int i = 0; i < op->getNumOperands(); ++i) { layouts.insert({i, layout}); } return layouts; } return ComputeLayoutBackward(op, output_layouts); } Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output) { SPMDExpanderBase* expander = SPMDExpanderRegistry::Global()->GetPropagateFnForOp(op); if (expander != nullptr) { return expander->ExpandOpAndSetLayout(op, output); } else { VLOG(1) << "No expansion found for " << OpName(op) << "\n"; *output = op; } return absl::OkStatus(); } } }
#include "tensorflow/dtensor/mlir/spmd_expander.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "llvm/ADT/DenseMap.h" #include "mlir/IR/Operation.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { namespace { using ::testing::IsNull; using ::testing::NotNull; class DummyExpander : public SPMDExpanderBase { StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override { return errors::Unimplemented(""); } StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) override { return errors::Unimplemented(""); } StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts) override { return errors::Unimplemented(""); } }; class SPMDExpanderRegistryTest : public ::testing::Test { public: SPMDExpanderRegistryTest() { registry_.RegisterPropagateFn(mlir::TF::AddOp::getOperationName().str(), std::make_unique<DummyExpander>()); } protected: SPMDExpanderRegistry registry_; }; TEST_F(SPMDExpanderRegistryTest, LookupFromOpName) { EXPECT_THAT(registry_.GetPropagateFnForFullOpName("tf.Add"), NotNull()); EXPECT_THAT(registry_.GetPropagateFnForFullOpName("Unknown"), IsNull()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/mlir/spmd_expander.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/spmd_expander_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6a7786ae-aaec-48bf-89fc-284799a17976
cpp
tensorflow/tensorflow
constant_folding
tensorflow/core/grappler/optimizers/constant_folding.cc
tensorflow/core/grappler/optimizers/constant_folding_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/grappler/optimizers/constant_folding.h" #include <cmath> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/grappler/clusters/cluster.h" #include "tensorflow/core/grappler/costs/graph_properties.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/op_types.h" #include "tensorflow/core/grappler/optimizers/evaluation_utils.h" #include "tensorflow/core/grappler/utils.h" #include "tensorflow/core/grappler/utils/symbolic_shapes.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/denormal.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/setround.h" #include "tensorflow/core/platform/tensor_coding.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/bcast.h" #include "tensorflow/core/util/overflow.h" #include "tensorflow/core/util/saved_tensor_slice_util.h" namespace tensorflow { namespace grappler { using TensorVector = absl::InlinedVector<TensorValue, 4UL>; const int64_t kMaxConstantSize = 100 * 1024; namespace { template <typename T> bool AllValuesAre(const TensorProto& proto, const T& value) { Tensor tensor; if (!tensor.FromProto(proto)) { return false; } auto values = tensor.flat<T>(); for (int i = 0; i < tensor.NumElements(); ++i) { if (values(i) != value) { return false; } } return true; } bool MaybeAddControlInput(const string& ctrl_input, NodeDef* node, GraphDef* graph, NodeMap* node_map) { bool already_exists = false; for (const string& input : node->input()) { if (input == ctrl_input || AsControlDependency(input) == ctrl_input) { already_exists = true; break; } } if (!already_exists) { const string ctrl_dep = ConstantFolding::AddControlDependency(ctrl_input, graph, node_map); node->add_input(ctrl_dep); node_map->AddOutput(NodeName(ctrl_input), node->name()); } return !already_exists; } bool MaybeRemoveControlInput(const string& old_input, NodeDef* node, GraphDef* graph, NodeMap* node_map) { bool removed_input = false; bool update_node_map = true; const string old_input_ctrl_dep = AsControlDependency(NodeName(old_input)); for (int i = 0; i < node->input_size(); ++i) { const string& input = node->input(i); if (old_input_ctrl_dep == input) { if (IsControlInput(input)) { node->mutable_input()->SwapElements(i, node->input_size() - 1); node->mutable_input()->RemoveLast(); removed_input = true; } else { update_node_map = false; } } } if (update_node_map) { node_map->RemoveOutput(NodeName(old_input), node->name()); } return removed_input; } bool HasTPUAttributes(const NodeDef& node) { AttrSlice attrs(node); for (const auto& attr : attrs) { if (attr.first.find("_tpu_") != attr.first.npos) { return true; } } return false; } template <typename T> bool PackedValuesNotEqual(T a, T b) { return a != b; } template <> bool PackedValuesNotEqual(float a, float b) { return reinterpret_cast<int32_t&>(a) != reinterpret_cast<int32_t&>(b); } template <> bool PackedValuesNotEqual(double a, double b) { return reinterpret_cast<int64_t&>(a) != reinterpret_cast<int64_t&>(b); } float QuantizedTypeMinAsFloat(DataType data_type) { switch (data_type) { case DT_QINT8: return Eigen::NumTraits<qint8>::lowest(); case DT_QUINT8: return Eigen::NumTraits<quint8>::lowest(); case DT_QINT16: return Eigen::NumTraits<qint16>::lowest(); case DT_QUINT16: return Eigen::NumTraits<quint16>::lowest(); case DT_QINT32: return Eigen::NumTraits<qint32>::lowest(); default: return 0.0f; } } float QuantizedTypeMaxAsFloat(DataType data_type) { switch (data_type) { case DT_QINT8: return Eigen::NumTraits<qint8>::highest(); case DT_QUINT8: return Eigen::NumTraits<quint8>::highest(); case DT_QINT16: return Eigen::NumTraits<qint16>::highest(); case DT_QUINT16: return Eigen::NumTraits<quint16>::highest(); case DT_QINT32: return Eigen::NumTraits<qint32>::highest(); default: return 0.0f; } } } ConstantFolding::ConstantFolding(RewriterConfig::Toggle opt_level, DeviceBase* cpu_device, bool disable_compressed_tensor_optimization, bool fold_quantization_emulation) : opt_level_(opt_level), cpu_device_(cpu_device), disable_compressed_tensor_optimization_( disable_compressed_tensor_optimization), fold_quantization_emulation_(fold_quantization_emulation) { resource_mgr_.reset(new ResourceMgr()); } ConstantFolding::ConstantFolding(DeviceBase* cpu_device, bool disable_compressed_tensor_optimization, bool fold_quantization_ops) : ConstantFolding(RewriterConfig::ON, cpu_device, disable_compressed_tensor_optimization, fold_quantization_ops) {} string ConstantFolding::AddControlDependency(const string& input_name, GraphDef* graph, NodeMap* node_map) { if (IsControlInput(input_name)) { return input_name; } const NodeDef* node = node_map->GetNode(input_name); if (!node) { return input_name; } if (!IsSwitch(*node)) { return AsControlDependency(*node); } else { for (const NodeDef* output : node_map->GetOutputs(node->name())) { if (IsIdentity(*output) || IsIdentityNSingleInput(*output)) { if (IsSameInput(output->name(), input_name)) { return AsControlDependency(*output); } } } int port = 0; string ctrl_dep_name = ParseNodeName(input_name, &port); strings::StrAppend(&ctrl_dep_name, "_", port); ctrl_dep_name = AddPrefixToNodeName(ctrl_dep_name, kConstantFoldingCtrl); const DataType output_type = node->attr().at("T").type(); NodeDef* added_node = node_map->GetNode(ctrl_dep_name); if (added_node == nullptr) { added_node = graph->add_node(); added_node->set_name(ctrl_dep_name); added_node->set_op("Identity"); added_node->set_device(node->device()); (*added_node->mutable_attr())["T"].set_type(output_type); *added_node->add_input() = input_name; node_map->AddNode(added_node->name(), added_node); node_map->AddOutput(node->name(), added_node->name()); } return AsControlDependency(*added_node); } } bool ConstantFolding::ForwardInputs(NodeDef* node, absl::Span<const int> inputs_to_forward) { for (int input_idx : inputs_to_forward) { if (input_idx < 0 || input_idx >= node->input_size()) { return false; } } const auto& tmp = node_map_->GetOutputs(node->name()); const std::vector<NodeDef*> consumers(tmp.begin(), tmp.end()); bool updated_graph = false; for (int input_idx : inputs_to_forward) { const string& input = node->input(input_idx); if (IsControlInput(input) && consumers.size() > 1) { continue; } const NodeDef* input_node = node_map_->GetNode(NodeName(input)); if (input_node == nullptr) { LOG(ERROR) << "Bad input: " << input; break; } for (NodeDef* consumer : consumers) { bool add_dep = false; for (int consumer_input_idx = 0; consumer_input_idx < consumer->input_size(); ++consumer_input_idx) { const string& consumer_input = consumer->input(consumer_input_idx); if (IsControlInput(consumer_input)) { break; } if (IsRetval(*consumer)) { break; } int output_idx; const string input_node_name = ParseNodeName(consumer_input, &output_idx); if (input_node_name == node->name() && output_idx == input_idx) { consumer->set_input(consumer_input_idx, input); node_map_->AddOutput(NodeName(input), consumer->name()); add_dep = true; } } if (add_dep) { consumer->add_input(AsControlDependency(node->name())); updated_graph = true; } } } if (updated_graph) { for (NodeDef* consumer : consumers) { DedupControlInputs(consumer); } } return updated_graph; } static Status PutValueIntoTensor(const int64_t value, const DataType& type, const int index, Tensor* tensor) { if (type == DT_INT32) { if (value >= INT_MAX) { return Status(absl::StatusCode::kInvalidArgument, "int32 overflow"); } tensor->flat<int32>()(index) = static_cast<int32>(value); } else { tensor->flat<int64_t>()(index) = value; } return absl::OkStatus(); } static Status ConvertShapeToConstant(const string& op, const DataType& type, const PartialTensorShape& shp, Tensor* tensor) { if (op == "Shape" || op == "ShapeN") { *tensor = Tensor(type, TensorShape({shp.dims()})); for (int i = 0; i < shp.dims(); ++i) { TF_RETURN_IF_ERROR(PutValueIntoTensor(shp.dim_size(i), type, i, tensor)); } } else if (op == "Size") { int64_t size = 1; for (int i = 0; i < shp.dims(); ++i) { size *= shp.dim_size(i); } *tensor = Tensor(type, TensorShape({})); TF_RETURN_IF_ERROR(PutValueIntoTensor(size, type, 0, tensor)); } else { CHECK_EQ(op, "Rank"); *tensor = Tensor(type, TensorShape({})); TF_RETURN_IF_ERROR(PutValueIntoTensor(shp.dims(), type, 0, tensor)); } return absl::OkStatus(); } bool ConstantFolding::OptimizedNodeExists(const NodeDef& node, StringPiece suffix) const { return node_map_->NodeExists(OptimizedNodeName(node, suffix)); } string ConstantFolding::OptimizedNodeName(const NodeDef& node, StringPiece suffix) const { return AddPrefixToNodeName(strings::StrCat(node.name(), suffix), kConstantFoldingConst); } bool ConstantFolding::IsReallyConstant(const NodeDef& node) const { if (!IsConstant(node)) { return false; } return feed_nodes_.find(node.name()) == feed_nodes_.end(); } bool ConstantFolding::GetTensorFromConstNode(const string& node_name_or_input, Tensor* tensor) { const NodeDef* node = node_map_->GetNode(node_name_or_input); return node != nullptr && IsReallyConstant(*node) && CheckAttrExists(*node, "value").ok() && tensor->FromProto(node->attr().at("value").tensor()); } Status ConstantFolding::MaterializeShapes(const GraphProperties& properties) { const int node_count = graph_->node_size(); for (int node_idx = 0; node_idx < node_count; ++node_idx) { NodeDef* node = graph_->mutable_node(node_idx); const string op = node->op(); if (op != "Shape" && op != "Size" && op != "Rank" && op != "ShapeN" && op != "TensorArraySizeV3") { continue; } const std::vector<OpInfo::TensorProperties>& output = properties.GetOutputProperties(node->name()); const std::vector<OpInfo::TensorProperties>& input = properties.GetInputProperties(node->name()); if (input.empty() || output.empty()) { continue; } if (op == "Shape" || op == "Size" || op == "Rank") { CHECK_EQ(1, output.size()); CHECK_EQ(1, input.size()); const DataType type = output[0].dtype(); CHECK(type == DT_INT32 || type == DT_INT64); const PartialTensorShape shape(input[0].shape()); if ((op != "Rank" && !shape.IsFullyDefined()) || (op == "Rank" && shape.unknown_rank())) { continue; } Tensor constant_value(type); if (!ConvertShapeToConstant(op, type, shape, &constant_value).ok()) { continue; } if (op == "Shape") { if (shape.dims() > 0 && shape.dim_size(0) == 0) continue; } graph_modified_ = true; node->set_op("Const"); EraseRegularNodeAttributes(node); (*node->mutable_attr())["dtype"].set_type(type); constant_value.AsProtoTensorContent( (*node->mutable_attr())["value"].mutable_tensor()); string ctrl_dep = AddControlDependency(node->input(0), graph_, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(0), ctrl_dep); node->set_input(0, ctrl_dep); continue; } if (op == "TensorArraySizeV3") { const NodeDef* array = CHECK_NOTNULL(node_map_->GetNode(node->input(0))); if (array->input_size() == 0 || (array->attr().count("dynamic_size") != 0 && array->attr().at("dynamic_size").b())) { continue; } const NodeDef* array_size = CHECK_NOTNULL(node_map_->GetNode(array->input(0))); if (IsReallyConstant(*array_size)) { if (array_size->attr().count("value") == 0) { continue; } const TensorProto& raw_val = array_size->attr().at("value").tensor(); if (raw_val.dtype() != DT_INT32) { continue; } Tensor value(raw_val.dtype(), raw_val.tensor_shape()); if (!value.FromProto(raw_val)) { continue; } if (value.flat<int32>()(0) == 0) { continue; } graph_modified_ = true; node->set_op("Const"); *node->mutable_attr() = array_size->attr(); node->set_input(0, AsControlDependency(NodeName(node->input(0)))); node->set_input(1, AddControlDependency(NodeName(node->input(1)), graph_, node_map_.get())); } continue; } CHECK_EQ(op, "ShapeN"); CHECK_EQ(input.size(), output.size()); const NodeDef* const shape_n_node = node; for (int port_idx = 0, idx_limit = output.size(); port_idx < idx_limit; ++port_idx) { const DataType type = output[port_idx].dtype(); CHECK(type == DT_INT32 || type == DT_INT64); const PartialTensorShape shape(input[port_idx].shape()); if (!shape.IsFullyDefined()) { continue; } Tensor constant_value(type); auto status = ConvertShapeToConstant(op, type, shape, &constant_value); if (!status.ok()) { continue; } auto fanouts = node_map_->GetOutputs(shape_n_node->name()); for (NodeDef* output : fanouts) { bool direct_edges_exist = false; for (int k = 0; k < output->input_size(); ++k) { int port; const string node_name = ParseNodeName(output->input(k), &port); if (node_name == shape_n_node->name() && port == port_idx) { const string const_name = OptimizedNodeName( *shape_n_node, strings::StrCat("-matshapes-", port_idx)); if (node_map_->GetNode(const_name) == nullptr) { NodeDef* added_node = graph_->add_node(); added_node->set_name(const_name); added_node->set_op("Const"); added_node->set_device(shape_n_node->device()); node_map_->AddNode(added_node->name(), added_node); (*added_node->mutable_attr())["dtype"].set_type(type); constant_value.AsProtoTensorContent( (*added_node->mutable_attr())["value"].mutable_tensor()); string ctrl_dep = AddControlDependency(shape_n_node->name(), graph_, node_map_.get()); *added_node->add_input() = ctrl_dep; node_map_->AddOutput(NodeName(ctrl_dep), added_node->name()); } *output->mutable_input(k) = const_name; node_map_->AddOutput(const_name, output->name()); graph_modified_ = true; } if (node_name == shape_n_node->name() && port != port_idx) { direct_edges_exist = true; } } if (!direct_edges_exist) { node_map_->RemoveOutput(node->name(), output->name()); } } } } return absl::OkStatus(); } namespace { bool ExtractShape(const NodeDef& shape_node, const GraphProperties& properties, BCast::Vec* shape, int64_t* min_id) { if (shape_node.op() == "Shape") { const std::vector<OpInfo::TensorProperties>& prop1 = properties.GetInputProperties(shape_node.name()); if (prop1.size() != 1) { return false; } const TensorShapeProto& shp = prop1[0].shape(); if (shp.unknown_rank()) { return false; } for (const auto& dim : shp.dim()) { shape->push_back(dim.size()); *min_id = std::min<int64_t>(*min_id, dim.size()); } } else { if (shape_node.attr().count("value") == 0) { return false; } const TensorProto& raw_val = shape_node.attr().at("value").tensor(); if (raw_val.dtype() != DT_INT64 && raw_val.dtype() != DT_INT32) { return false; } Tensor value(raw_val.dtype(), raw_val.tensor_shape()); if (!value.FromProto(raw_val)) { return false; } for (int j = 0; j < value.NumElements(); ++j) { if (raw_val.dtype() == DT_INT64) { shape->push_back(value.vec<int64_t>()(j)); } else { shape->push_back(value.vec<int>()(j)); } } } return true; } } Status ConstantFolding::MaterializeBroadcastGradientArgs( const NodeDef& node, const GraphProperties& properties) { const NodeDef* shape_node1 = node_map_->GetNode(node.input(0)); const NodeDef* shape_node2 = node_map_->GetNode(node.input(1)); if (shape_node1 == nullptr || (shape_node1->op() != "Shape" && !IsReallyConstant(*shape_node1)) || shape_node2 == nullptr || (shape_node2->op() != "Shape" && !IsReallyConstant(*shape_node2))) { return absl::OkStatus(); } if (OptimizedNodeExists(node, "-folded-1") || OptimizedNodeExists(node, "-folded-2")) { return absl::OkStatus(); } int64_t min_id = 0; BCast::Vec shape1; if (!ExtractShape(*shape_node1, properties, &shape1, &min_id)) { return absl::OkStatus(); } BCast::Vec shape2; if (!ExtractShape(*shape_node2, properties, &shape2, &min_id)) { return absl::OkStatus(); } for (auto& id : shape1) { if (id == -1) { id = --min_id; } } for (auto& id : shape2) { if (id == -1) { id = --min_id; } } const int common_dims = std::min(shape1.size(), shape2.size()); for (int i = 0; i < common_dims; ++i) { if (shape1[i] >= 0 && shape2[i] >= 0) { continue; } if (shape1[i] != shape2[i]) { return absl::OkStatus(); } } for (int i = common_dims, end = shape1.size(); i < end; ++i) { if (shape1[i] < 0) { return absl::OkStatus(); } } for (int i = common_dims, end = shape2.size(); i < end; ++i) { if (shape2[i] < 0) { return absl::OkStatus(); } } BCast bcast(shape1, shape2); if (!bcast.IsValid()) { return absl::OkStatus(); } BCast::Vec reduce_dims[2]; reduce_dims[0] = bcast.grad_x_reduce_idx(); reduce_dims[1] = bcast.grad_y_reduce_idx(); TF_RETURN_IF_ERROR(CheckAttrExists(node, "T")); const DataType type = node.attr().at("T").type(); NodeDef* out[2]; for (int j = 0; j < 2; ++j) { int reduction_indices = reduce_dims[j].size(); Tensor value(type, TensorShape({reduction_indices})); for (int i = 0; i < reduction_indices; ++i) { if (type == DT_INT32) { value.vec<int32>()(i) = reduce_dims[j][i]; } else { value.vec<int64_t>()(i) = reduce_dims[j][i]; } } string const_name = OptimizedNodeName(node, strings::StrCat("-bcastargs-", j)); out[j] = node_map_->GetNode(const_name); if (out[j] == nullptr) { out[j] = graph_->add_node(); TF_RETURN_IF_ERROR( CreateNodeDef(const_name, TensorValue(&value), out[j])); out[j]->set_device(node.device()); node_map_->AddNode(const_name, out[j]); string ctrl_dep = AddControlDependency(node.name(), graph_, node_map_.get()); *out[j]->add_input() = ctrl_dep; node_map_->AddOutput(NodeName(ctrl_dep), const_name); } } const auto outputs = node_map_->GetOutputs(node.name()); for (NodeDef* output : outputs) { for (int k = 0; k < output->input_size(); ++k) { int port; string node_name = ParseNodeName(output->input(k), &port); if (node_name == node.name() && port >= 0 && port < 2 && out[port]) { *output->mutable_input(k) = out[port]->name(); node_map_->UpdateInput(output->name(), node_name, out[port]->name()); } } } return absl::OkStatus(); } Status ConstantFolding::MaterializeReductionIndices( NodeDef* node, const GraphProperties& properties) { if (node->input_size() < 2) { return absl::OkStatus(); } const NodeDef* indices = node_map_->GetNode(node->input(1)); if (!indices || IsReallyConstant(*indices)) { return absl::OkStatus(); } const std::vector<OpInfo::TensorProperties>& input_props = properties.GetInputProperties(node->name()); if (input_props.size() != 2) { return absl::OkStatus(); } const OpInfo::TensorProperties& input_prop = input_props[0]; if (input_prop.shape().unknown_rank()) { return absl::OkStatus(); } const int input_rank = input_prop.shape().dim_size(); if (input_rank < 1) { return absl::OkStatus(); } const OpInfo::TensorProperties& reduction_indices_prop = input_props[1]; DataType dtype = reduction_indices_prop.dtype(); if (dtype != DT_INT32 && dtype != DT_INT64) { return absl::OkStatus(); } PartialTensorShape reduction_indices_shape(reduction_indices_prop.shape()); const int num_reduction_indices = reduction_indices_shape.num_elements(); const std::vector<OpInfo::TensorProperties>& output_props = properties.GetOutputProperties(node->name()); if (output_props.size() != 1) { return absl::OkStatus(); } const OpInfo::TensorProperties& output_prop = output_props[0]; const int output_rank = output_prop.shape().unknown_rank() ? -1 : output_prop.shape().dim_size(); bool full_reduction = output_rank == 0 || num_reduction_indices == input_rank; if (!full_reduction) { for (const NodeDef* fanout : node_map_->GetOutputs(node->name())) { full_reduction = false; if (!IsReshape(*fanout)) { return absl::OkStatus(); } const std::vector<OpInfo::TensorProperties>& reshape_props = properties.GetOutputProperties(fanout->name()); if (reshape_props.size() != 1) { return absl::OkStatus(); } const OpInfo::TensorProperties& reshape_prop = reshape_props[0]; PartialTensorShape shape(reshape_prop.shape()); if (shape.num_elements() != 1) { return absl::OkStatus(); } else { full_reduction = true; } } if (!full_reduction) { return absl::OkStatus(); } } string const_name = OptimizedNodeName(*node, "-reduction_indices"); if (node_map_->GetNode(const_name)) { return absl::OkStatus(); } NodeDef* reduction_indices = graph_->add_node(); Tensor value(dtype, TensorShape({input_rank})); for (int i = 0; i < input_rank; ++i) { if (dtype == DT_INT32) { value.vec<int32>()(i) = i; } else { value.vec<int64_t>()(i) = i; } } TF_RETURN_IF_ERROR( CreateNodeDef(const_name, TensorValue(&value), reduction_indices)); reduction_indices->set_device(node->device()); string ctrl_dep = AddControlDependency(node->input(1), graph_, node_map_.get()); *reduction_indices->add_input() = ctrl_dep; node_map_->AddNode(const_name, reduction_indices); node_map_->AddOutput(NodeName(ctrl_dep), const_name); node->set_input(1, reduction_indices->name()); node_map_->UpdateInput(node->name(), indices->name(), reduction_indices->name()); return absl::OkStatus(); } Status ConstantFolding::MaterializeConstantValuedNode( NodeDef* node, const GraphProperties& properties) { if (disable_compressed_tensor_optimization_) { return absl::OkStatus(); } const std::vector<OpInfo::TensorProperties>& output_props = properties.GetOutputProperties(node->name()); if (output_props.size() != 1) return absl::OkStatus(); const auto& output_shape = output_props[0].shape(); if (!PartialTensorShape(output_shape).IsFullyDefined()) { return absl::OkStatus(); } if (IsFill(*node)) { const auto output_dtype = output_props[0].dtype(); NodeDef* input_node = nullptr; for (int i = 0; i < 2; ++i) { input_node = node_map_->GetNode(NodeName(node->input(i))); if (input_node == nullptr || !IsReallyConstant(*input_node)) { return absl::OkStatus(); } } TF_RETURN_IF_ERROR(CheckAttrExists(*input_node, "value")); TensorProto* tensor = (*node->mutable_attr())["value"].mutable_tensor(); const TensorProto& input_tensor = input_node->attr().at("value").tensor(); if (!input_tensor.tensor_content().empty()) { Tensor t; if (!t.FromProto(input_tensor)) { return absl::InvalidArgumentError(absl::StrCat( "Could not construct Tensor form TensorProto in node: ", input_node->name())); } tensor->clear_tensor_content(); t.AsProtoField(tensor); } else { *tensor = input_tensor; } *(tensor->mutable_tensor_shape()) = output_shape; (*node->mutable_attr())["dtype"].set_type(output_dtype); node->mutable_attr()->erase("T"); node->mutable_attr()->erase("index_type"); node->set_op("Const"); for (int i = 0; i < 2; i++) { const string ctrl_dep = AsControlDependency(node->input(i)); node_map_->UpdateInput(node->name(), node->input(i), ctrl_dep); node->set_input(i, ctrl_dep); } graph_modified_ = true; } else { double value = (IsZerosLike(*node) ? 0.0 : (IsOnesLike(*node) ? 1.0 : -1.0)); if (value >= 0) { TF_RETURN_IF_ERROR(ReplaceOperationWithConstant( value, properties, output_shape, node, graph_)); } } return absl::OkStatus(); } Status ConstantFolding::MaterializeOutputValues( NodeDef* node, const GraphProperties& properties) { const std::vector<OpInfo::TensorProperties>& output = properties.GetOutputProperties(node->name()); if (output.size() != 1 || !output[0].has_value() || !IsFoldable(*node, &properties)) { return absl::OkStatus(); } if (IsIdentity(*node)) { NodeDef* input = node_map_->GetNode(node->input(0)); if (IsReallyConstant(*input)) { std::vector<int> inputs_to_forward; std::iota(inputs_to_forward.begin(), inputs_to_forward.end(), 0); graph_modified_ = ForwardInputs(node, inputs_to_forward); return absl::OkStatus(); } } TensorProto value_copy = output[0].value(); return ReplaceOperationWithConstantTensor(output[0].dtype(), &value_copy, node, graph_); } Status ConstantFolding::MaterializeConstants( const GraphProperties& properties) { const int node_count = graph_->node_size(); for (int i = 0; i < node_count; ++i) { NodeDef& node = *graph_->mutable_node(i); const string& op = node.op(); if (op == "BroadcastGradientArgs") { TF_RETURN_IF_ERROR(MaterializeBroadcastGradientArgs(node, properties)); } else if (IsReduction(node)) { TF_RETURN_IF_ERROR(MaterializeReductionIndices(&node, properties)); } else if (IsFill(node) || IsZerosLike(node) || IsOnesLike(node)) { TF_RETURN_IF_ERROR(MaterializeConstantValuedNode(&node, properties)); } else { TF_RETURN_IF_ERROR(MaterializeOutputValues(&node, properties)); } } return absl::OkStatus(); } bool ConstantFolding::IsFoldable(const NodeDef& node, const GraphProperties* properties) { string key = strings::StrCat(node.name(), "/", node.op()); auto it = maybe_foldable_nodes_.find(key); if (it == maybe_foldable_nodes_.end()) { it = maybe_foldable_nodes_ .emplace(std::move(key), MaybeFoldable(node, properties)) .first; } if (!it->second) { return false; } else { return IsFoldableUncached(node, properties); } } bool ConstantFolding::IsFoldableUncached( const NodeDef& node, const GraphProperties* properties) const { if (node.input().empty()) { return false; } bool merge_has_constant_input = false; const bool is_merge = IsMerge(node); for (const auto& input : node.input()) { if (IsControlInput(input)) { continue; } const NodeDef* input_node = node_map_->GetNode(input); if (!input_node) { return false; } bool is_const = IsReallyConstant(*input_node); if (is_const) { if (input_node->attr().count("dtype") == 0 || input_node->attr().at("dtype").type() == DT_STRING) { return false; } merge_has_constant_input |= !HasControlInputs(*input_node); } else if (!is_merge) { return false; } } if (is_merge && !merge_has_constant_input) return false; if (disable_compressed_tensor_optimization_ && (IsFill(node) || IsZerosLike(node) || IsOnesLike(node))) return false; if (properties != nullptr && properties->HasOutputProperties(node.name())) { const std::vector<OpInfo::TensorProperties>& input_props = properties->GetInputProperties(node.name()); const std::vector<OpInfo::TensorProperties>& output_props = properties->GetOutputProperties(node.name()); int64_t input_size_bytes = 0; for (const auto& input_prop : input_props) { const PartialTensorShape input_shape(input_prop.shape()); if (input_shape.IsFullyDefined()) { int64_t bytes = MultiplyWithoutOverflow( input_shape.num_elements(), DataTypeSize(input_prop.dtype())); input_size_bytes = AddWithoutOverflow(input_size_bytes, bytes); } } if (input_size_bytes < 0) { input_size_bytes = INT64_MAX; } for (const auto& output_prop : output_props) { PartialTensorShape output_shape; if (!PartialTensorShape::BuildPartialTensorShape(output_prop.shape(), &output_shape) .ok()) { return false; } if (output_shape.IsFullyDefined()) { const int64_t num_bytes = MultiplyWithoutOverflow( output_shape.num_elements(), DataTypeSize(output_prop.dtype())); if (num_bytes < 0) { return false; } if (num_bytes > input_size_bytes && num_bytes > kMaxConstantSize) { return false; } } } } return true; } bool ConstantFolding::MaybeFoldable(const NodeDef& node, const GraphProperties* properties) const { if (IsConstant(node)) { return false; } if (!IsFreeOfSideEffect(node)) { return false; } if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end() && nodes_allowlist_.find(node.name()) == nodes_allowlist_.end()) { return false; } if (ModifiesFrameInfo(node)) { return false; } if (IsPlaceholder(node)) { return false; } if (IsFakeParam(node)) { return false; } if (node.op() == "AccumulateNV2") { return false; } if (node.op() == "LoopCond") { return false; } if (!fold_quantization_emulation_ && IsQuantizationEmulation(node)) { return false; } const string& op = node.op(); if (op.find("Save") != string::npos || op.find("Restore") != string::npos || op.find("Reader") != string::npos) { return false; } if (op.find("Quantized") != string::npos || absl::StartsWith(op, "Sparse")) { return false; } if (HasTPUAttributes(node)) { return false; } const OpDef* op_def = nullptr; Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def); if (!status.ok()) { return false; } if (op_def->output_arg_size() == 0) { return false; } for (const OpDef::ArgDef& output_arg : op_def->output_arg()) { if (output_arg.type() == DT_VARIANT) { return false; } } const auto& outputs = node_map_->GetOutputs(node.name()); if (outputs.empty() && nodes_allowlist_.find(node.name()) == nodes_allowlist_.end()) { return false; } return true; } namespace { #define SET_TENSOR_VAL_CASE(DTYPE, TYPE, NAME) \ case DTYPE: \ t->add_##NAME##_val(static_cast<TYPE>(value)); \ break; Status CreateConstantTensorAttrValue(DataType type, double value, const TensorShapeProto& shape, AttrValue* attr_tensor) { TensorProto* t = attr_tensor->mutable_tensor(); t->set_dtype(type); *t->mutable_tensor_shape() = shape; switch (type) { case DT_HALF: t->add_half_val( Eigen::numext::bit_cast<uint16>(static_cast<Eigen::half>(value))); break; case DT_BFLOAT16: t->add_half_val( Eigen::numext::bit_cast<uint16>(static_cast<bfloat16>(value))); break; SET_TENSOR_VAL_CASE(DT_FLOAT, float, float); SET_TENSOR_VAL_CASE(DT_DOUBLE, double, double); SET_TENSOR_VAL_CASE(DT_INT64, int64_t, int64); SET_TENSOR_VAL_CASE(DT_UINT64, int64_t, int64); SET_TENSOR_VAL_CASE(DT_INT32, int32, int); SET_TENSOR_VAL_CASE(DT_UINT32, int32, int); SET_TENSOR_VAL_CASE(DT_INT16, int32, int); SET_TENSOR_VAL_CASE(DT_UINT16, int32, int); SET_TENSOR_VAL_CASE(DT_INT8, int32, int); SET_TENSOR_VAL_CASE(DT_UINT8, int32, int); SET_TENSOR_VAL_CASE(DT_QINT32, int32, int); SET_TENSOR_VAL_CASE(DT_QINT16, int32, int); SET_TENSOR_VAL_CASE(DT_QUINT16, int32, int); SET_TENSOR_VAL_CASE(DT_QINT8, int32, int); SET_TENSOR_VAL_CASE(DT_QUINT8, int32, int); SET_TENSOR_VAL_CASE(DT_BOOL, bool, bool); default: return absl::InvalidArgumentError( absl::StrCat("Unsupported type in CreateConstantTensorAttrValue: ", DataTypeString(type))); } return absl::OkStatus(); } #undef SET_TENSOR_CAL_CASE DataType GetDataTypeFromNodeOrProps(const NodeDef& node, const GraphProperties& properties) { DataType dtype = DT_INVALID; if (node.attr().count("T") == 1) { dtype = node.attr().at("T").type(); } else if (node.attr().count("dtype") == 1) { dtype = node.attr().at("dtype").type(); } else if (IsLogicalOr(node) || IsLogicalAnd(node)) { dtype = DT_BOOL; } else { auto output_props = properties.GetOutputProperties(node.name()); if (!output_props.empty()) { dtype = output_props[0].dtype(); } } return dtype; } bool IsValidConstShapeForMulConvPushDown( const string& data_format, const TensorShapeProto& filter_shape, const TensorShapeProto& mul_const_input_shape) { if (mul_const_input_shape.dim_size() <= static_cast<int>(data_format.size()) && TensorShape(mul_const_input_shape).num_elements() == 1) { return true; } if (data_format == "NHWC" || data_format == "NDHWC") { TensorShapeProto new_filter_shape; if (!ShapeAfterBroadcast(filter_shape, mul_const_input_shape, &new_filter_shape)) { return false; } if (!ShapesSymbolicallyEqual(filter_shape, new_filter_shape)) { return false; } for (int i = 0; i < mul_const_input_shape.dim_size() - 1; ++i) { if (mul_const_input_shape.dim(i).size() > 1) return false; } return true; } else if (data_format == "NCHW" || data_format == "NCDHW") { return false; } return false; } } Status ConstantFolding::CreateNodeDef(const string& name, const TensorValue& tensor, NodeDef* node, size_t original_size) { node->set_name(name); node->set_op("Const"); AttrValue attr_type; attr_type.set_type(tensor->dtype()); node->mutable_attr()->insert({"dtype", attr_type}); AttrValue attr_tensor; TensorProto* t = attr_tensor.mutable_tensor(); bool optimized = false; size_t encoded_size; if (tensor->NumElements() > 4) { #define POPULATE_TENSOR_PROTO(tensor, t, TYPE, FIELDTYPE) \ { \ const auto* val_ptr = tensor->flat<TYPE>().data(); \ auto last = *val_ptr; \ int64_t last_index = 0; \ for (int64_t i = 0; i < tensor->NumElements(); ++i) { \ TYPE cur = *val_ptr++; \ if (PackedValuesNotEqual(cur, last)) { \ last = cur; \ last_index = i; \ } \ } \ encoded_size = (last_index + 1) * sizeof(FIELDTYPE); \ if (encoded_size < kint32max) { \ optimized = true; \ t->mutable_##FIELDTYPE##_val()->Reserve(last_index + 1); \ const auto* src_ptr = tensor->flat<TYPE>().data(); \ auto* dst_ptr = t->mutable_##FIELDTYPE##_val() \ -> AddNAlreadyReserved(last_index + 1); \ std::copy(src_ptr, src_ptr + last_index + 1, dst_ptr); \ } \ } \ break switch (tensor->dtype()) { case DT_FLOAT: POPULATE_TENSOR_PROTO(tensor, t, float, float); case DT_DOUBLE: POPULATE_TENSOR_PROTO(tensor, t, double, double); case DT_INT64: POPULATE_TENSOR_PROTO(tensor, t, int64_t, int64); case DT_UINT64: POPULATE_TENSOR_PROTO(tensor, t, uint64, uint64); case DT_INT32: POPULATE_TENSOR_PROTO(tensor, t, int32_t, int); case DT_UINT32: POPULATE_TENSOR_PROTO(tensor, t, uint32, uint32); case DT_INT16: POPULATE_TENSOR_PROTO(tensor, t, int16_t, int); case DT_UINT16: POPULATE_TENSOR_PROTO(tensor, t, uint16, int); case DT_INT8: POPULATE_TENSOR_PROTO(tensor, t, int8_t, int); case DT_UINT8: POPULATE_TENSOR_PROTO(tensor, t, uint8, int); case DT_BOOL: POPULATE_TENSOR_PROTO(tensor, t, bool, bool); default: break; } } if (optimized) { t->set_dtype(tensor->dtype()); tensor->shape().AsProto(t->mutable_tensor_shape()); } else { tensor->AsProtoTensorContent(t); encoded_size = t->tensor_content().size(); } node->mutable_attr()->insert({"value", attr_tensor}); if (encoded_size > original_size && encoded_size >= kMaxConstantSize) { return absl::InvalidArgumentError( absl::StrCat("Can't fold ", name, ", its size would be too large (", encoded_size, " >= ", kMaxConstantSize, " bytes)")); } return absl::OkStatus(); } Status ConstantFolding::EvaluateNode(const NodeDef& node, const TensorVector& inputs, TensorVector* output) const { return ::tensorflow::grappler::EvaluateNode(node, inputs, cpu_device_, resource_mgr_.get(), output); } Status ConstantFolding::EvaluateOneFoldable(const NodeDef& node, std::vector<NodeDef>* outputs, bool* result_too_large) { TensorVector inputs; TensorVector output_tensors; auto inputs_cleanup = gtl::MakeCleanup([&inputs, &output_tensors] { for (const auto& input : inputs) { delete input.tensor; } for (const auto& output : output_tensors) { if (output.tensor) { delete output.tensor; } } }); size_t total_inputs_size = 0; for (const auto& input : node.input()) { const TensorId input_tensor = ParseTensorName(input); if (input_tensor.index() < 0) { break; } const NodeDef* input_node = node_map_->GetNode(input); if (!IsReallyConstant(*input_node)) { return Status(absl::StatusCode::kInvalidArgument, strings::StrCat("Can't fold ", node.name(), ", its ", input, " isn't constant")); } TF_RETURN_IF_ERROR(CheckAttrExists(*input_node, "value")); const TensorProto& raw_val = input_node->attr().at("value").tensor(); if (raw_val.dtype() == DT_INVALID) { return Status( absl::StatusCode::kInvalidArgument, strings::StrCat("A tensor in the input node, with TensorId of ", input_tensor.ToString(), " has a dtype of DT_INVALID.")); } if (IsRefType(raw_val.dtype())) { return absl::InvalidArgumentError(absl::StrCat( "Not allowed to construct a tensor with reference dtype, got ", DataTypeString(raw_val.dtype()))); } Tensor* value = new Tensor(raw_val.dtype(), raw_val.tensor_shape()); if (!value->FromProto(raw_val)) { delete (value); return absl::InvalidArgumentError( absl::StrCat("Unable to make Tensor from proto for ", node.name(), " with shape ", raw_val.tensor_shape().DebugString())); } inputs.emplace_back(value); total_inputs_size += value->TotalBytes(); } TF_RETURN_IF_ERROR(EvaluateNode(node, inputs, &output_tensors)); if (output_tensors.empty()) { return Status(absl::StatusCode::kInvalidArgument, "Expected at least one output."); } outputs->resize(output_tensors.size()); for (size_t i = 0; i < output_tensors.size(); i++) { string node_name = OptimizedNodeName(node, "-folded"); if (output_tensors.size() > 1) { node_name = strings::StrCat(node_name, "-", i); } if (output_tensors[i].tensor) { Status s = CreateNodeDef(node_name, output_tensors[i], &outputs->at(i), total_inputs_size); if (!s.ok()) { *result_too_large = true; return s; } } else { outputs->at(i) = NodeDef(); } } return absl::OkStatus(); } Status ConstantFolding::FoldMergeNode(NodeDef* node, GraphDef* output_graph) { for (int input_index = 0; input_index < node->input_size(); ++input_index) { const auto& input = node->input(input_index); if (IsControlInput(input)) { continue; } NodeDef* input_node = node_map_->GetNode(input); if (!IsReallyConstant(*input_node)) { continue; } bool valid_input = true; for (const string& fanin_of_input : input_node->input()) { if (IsControlInput(fanin_of_input)) { valid_input = false; break; } } if (!valid_input) { continue; } string const_out_name = OptimizedNodeName(*node, "_const"); string const_index_name = OptimizedNodeName(*node, "_index"); if (node_map_->GetNode(const_out_name) || node_map_->GetNode(const_index_name)) { return absl::AlreadyExistsError( strings::StrCat(const_out_name, " or ", const_index_name, " already present in the graph")); } NodeDef* const_out = output_graph->add_node(); *const_out = *input_node; const_out->set_name(const_out_name); const_out->set_device(node->device()); *const_out->add_input() = AsControlDependency(*node); node_map_->AddNode(const_out->name(), const_out); node_map_->AddOutput(node->name(), const_out->name()); NodeDef* const_index = output_graph->add_node(); const_index->set_op("Const"); Tensor index(DT_INT32, TensorShape({})); index.flat<int32>()(0) = input_index; (*const_index->mutable_attr())["dtype"].set_type(DT_INT32); index.AsProtoTensorContent( (*const_index->mutable_attr())["value"].mutable_tensor()); const_index->set_name(const_index_name); const_index->set_device(node->device()); *const_index->add_input() = AsControlDependency(*node); node_map_->AddNode(const_index->name(), const_index); node_map_->AddOutput(node->name(), const_index->name()); auto outputs = node_map_->GetOutputs(node->name()); for (NodeDef* output : outputs) { for (int i = 0; i < output->input_size(); i++) { int port; string node_name = ParseNodeName(output->input(i), &port); if (node_name == node->name()) { if (port == 0) { *output->mutable_input(i) = const_out->name(); node_map_->AddOutput(const_out->name(), output->name()); } else if (port == 1) { *output->mutable_input(i) = const_index->name(); node_map_->AddOutput(const_index->name(), output->name()); } else { } } } } return absl::OkStatus(); } return absl::OkStatus(); } Status ConstantFolding::FoldNode(NodeDef* node, GraphDef* output_graph, bool* result_too_large) { *result_too_large = false; if (IsMerge(*node)) { return FoldMergeNode(node, output_graph); } std::vector<NodeDef> const_nodes; TF_RETURN_IF_ERROR( EvaluateOneFoldable(*node, &const_nodes, result_too_large)); VLOG(2) << "Folded node: " << SummarizeNodeDef(*node); NodeDef* constant_output = nullptr; for (int i = 0, end = const_nodes.size(); i < end; i++) { NodeDef* const_node = &const_nodes[i]; VLOG(3) << "Generated constant node: " << SummarizeNodeDef(*const_node); if (const_node->name().empty()) { continue; } const auto is_duplicate_control_input = [&](const string& input) -> bool { auto it = absl::c_find(const_node->input(), input); return it != const_node->input().end(); }; for (const string& input : node->input()) { if (IsControlInput(input)) { if (!is_duplicate_control_input(input)) { *const_node->add_input() = input; } } if (!IsControlInput(input)) { NodeDef* input_node = node_map_->GetNode(input); for (const string& fanin_of_input : input_node->input()) { if (!is_duplicate_control_input(fanin_of_input)) { *const_node->add_input() = fanin_of_input; } } } } if (const_nodes.size() == 1) { node->set_op("Const"); node_map_->RemoveInputs(node->name()); node->clear_input(); *node->mutable_input() = const_node->input(); for (const auto& input : node->input()) { node_map_->AddOutput(NodeName(input), node->name()); } *node->mutable_attr() = const_node->attr(); break; } else { if (node_map_->GetNode(const_node->name())) { return absl::AlreadyExistsError(strings::StrCat( const_node->name(), " already present in the graph")); } NodeDef* added_node = output_graph->add_node(); *added_node = *const_node; added_node->set_device(node->device()); node_map_->AddNode(added_node->name(), added_node); for (const auto& input : added_node->input()) { node_map_->AddOutput(NodeName(input), added_node->name()); } constant_output = added_node; } } if (const_nodes.size() > 1) { auto outputs = node_map_->GetOutputs(node->name()); for (NodeDef* output : outputs) { for (int i = 0; i < output->input_size(); i++) { int port; string node_name = ParseNodeName(output->input(i), &port); if (node_name == node->name()) { if (port < 0) { if (constant_output != nullptr) { node_map_->UpdateInput(node_name, NodeName(output->input(i)), constant_output->name()); *output->mutable_input(i) = AsControlDependency(*constant_output); } } else if (port < static_cast<int>(const_nodes.size()) && !const_nodes[port].name().empty()) { node_map_->UpdateInput(output->name(), NodeName(output->input(i)), const_nodes[port].name()); *output->mutable_input(i) = const_nodes[port].name(); } else { VLOG(3) << "Preserving edge from " << node->name() << ":" << port << "[" << node->op() << "] to " << output->name() << ":" << i << "[" << output->op() << "]"; } } } } outputs = node_map_->GetOutputs(node->name()); if (outputs.empty() && has_fetch_ && nodes_to_preserve_.find(node->name()) == nodes_to_preserve_.end()) { node_map_->RemoveInputs(node->name()); node->clear_input(); } } return absl::OkStatus(); } Status ConstantFolding::FoldGraph( const GraphProperties& properties, GraphDef* optimized_graph, absl::flat_hash_set<string>* nodes_to_not_simplify) { absl::flat_hash_set<string> processed_nodes; std::deque<NodeDef*> queue; for (int i = 0; i < graph_->node_size(); i++) { const NodeDef& node = graph_->node(i); if (IsFoldable(node, &properties) && !nodes_to_not_simplify->count(node.name())) { queue.push_back(graph_->mutable_node(i)); } } while (!queue.empty()) { NodeDef* node = queue.front(); queue.pop_front(); if (processed_nodes.count(node->name())) { continue; } std::vector<NodeDef*> fanout = node_map_->GetOutputsOrderedByNodeName(node->name()); bool result_too_large = false; Status s = FoldNode(node, optimized_graph, &result_too_large); processed_nodes.insert(node->name()); if (!s.ok()) { VLOG(1) << "Failed to fold node " << node->DebugString() << "\nError message: " << s; if (result_too_large) { nodes_to_not_simplify->emplace(node->name()); } } else { for (auto& fanout_node : fanout) { if (IsFoldable(*fanout_node, &properties) && !nodes_to_not_simplify->count(fanout_node->name())) { queue.push_back(fanout_node); } } } } std::vector<int> nodes_to_delete; for (int i = 0; i < optimized_graph->node_size(); i++) { const auto& fanout = node_map_->GetOutputs(optimized_graph->node(i).name()); if (fanout.empty()) nodes_to_delete.push_back(i); } EraseNodesFromGraph(std::move(nodes_to_delete), optimized_graph); for (int i = 0; i < graph_->node_size(); ++i) { NodeDef* node = graph_->mutable_node(i); const auto& fanout = node_map_->GetOutputs(node->name()); if (!fanout.empty() || !has_fetch_ || nodes_to_preserve_.find(node->name()) != nodes_to_preserve_.end()) { *(optimized_graph->add_node()) = std::move(*node); } } return absl::OkStatus(); } Status ConstantFolding::IsSimplifiableReshape( const NodeDef& node, const GraphProperties& properties) const { if (!IsReshape(node)) { return absl::InternalError( absl::StrCat("Node ", node.name(), " is not a Reshape node")); } if (2 > node.input_size()) { return absl::InternalError(absl::StrCat( "Node ", node.name(), " must have at most 2 inputs but has ", node.input_size())); } const NodeDef* new_shape = node_map_->GetNode(node.input(1)); if (!IsReallyConstant(*new_shape)) { return absl::InternalError(absl::StrCat("Node ", node.name(), " has shape ", new_shape->DebugString(), " which is not a constant")); } TensorVector outputs; auto outputs_cleanup = gtl::MakeCleanup([&outputs] { for (const auto& output : outputs) { delete output.tensor; } }); Status s = EvaluateNode(*new_shape, TensorVector(), &outputs); if (!s.ok()) { return absl::InternalError( absl::StrCat("Could not evaluate node ", node.name())); } if (outputs.size() != 1) { return absl::InternalError( absl::StrCat("Node ", node.name(), " must have exactly 1 output but has ", outputs.size())); } const std::vector<OpInfo::TensorProperties>& props = properties.GetInputProperties(node.name()); if (props.empty()) { return absl::InternalError( absl::StrCat("Node ", node.name(), " has no properties")); } const OpInfo::TensorProperties& prop = props[0]; if (prop.dtype() == DT_INVALID) { return absl::InternalError( absl::StrCat("Node ", node.name(), " has property ", prop.DebugString(), " with invalid dtype")); } const PartialTensorShape shape(prop.shape()); if (!shape.IsFullyDefined()) { return absl::InternalError(absl::StrCat( "Node ", node.name(), " has property ", prop.DebugString(), " with shape ", shape.DebugString(), " which is not fully defined")); } PartialTensorShape new_dims; if (outputs[0]->dtype() == DT_INT32) { std::vector<int32> shp; for (int i = 0; i < outputs[0]->NumElements(); ++i) { int32_t dim = outputs[0]->flat<int32>()(i); shp.push_back(dim); } s = TensorShapeUtils::MakeShape(shp, &new_dims); if (!s.ok()) return s; } else { std::vector<int64_t> shp; for (int i = 0; i < outputs[0]->NumElements(); ++i) { int64_t dim = outputs[0]->flat<int64_t>()(i); shp.push_back(dim); } s = TensorShapeUtils::MakeShape(shp, &new_dims); if (!s.ok()) return s; } if (!shape.IsCompatibleWith(new_dims)) { return absl::InternalError( absl::StrCat("Expected shape ", shape.DebugString(), "to be compatible with ", new_dims.DebugString())); } return absl::OkStatus(); } #define IS_VALUE_CASE(DTYPE, VALUE) \ case DTYPE: \ return AllValuesAre<EnumToDataType<DTYPE>::Type>( \ node.attr().at("value").tensor(), EnumToDataType<DTYPE>::Type(VALUE)) #define IS_ONES_CASE(TYPE) IS_VALUE_CASE(TYPE, 1) #define IS_ZEROS_CASE(TYPE) IS_VALUE_CASE(TYPE, 0) bool ConstantFolding::IsOnes(const NodeDef& node) const { if (feed_nodes_.find(node.name()) != feed_nodes_.end()) { return false; } if (IsOnesLike(node)) return true; if (IsZerosLike(node)) return false; if (node.op() == "Fill") { NodeDef* values = node_map_->GetNode(NodeName(node.input(1))); return values != nullptr && IsOnes(*values); } if (node.op() != "Const") return false; if (node.attr().count("dtype") == 0) return false; const auto dtype = node.attr().at("dtype").type(); switch (dtype) { IS_ONES_CASE(DT_BOOL); IS_ONES_CASE(DT_HALF); IS_ONES_CASE(DT_BFLOAT16); IS_ONES_CASE(DT_FLOAT); IS_ONES_CASE(DT_DOUBLE); IS_ONES_CASE(DT_COMPLEX64); IS_ONES_CASE(DT_COMPLEX128); IS_ONES_CASE(DT_UINT8); IS_ONES_CASE(DT_INT8); IS_ONES_CASE(DT_UINT16); IS_ONES_CASE(DT_INT16); IS_ONES_CASE(DT_INT32); IS_ONES_CASE(DT_INT64); IS_ONES_CASE(DT_QINT32); IS_ONES_CASE(DT_QINT16); IS_ONES_CASE(DT_QUINT16); IS_ONES_CASE(DT_QINT8); IS_ONES_CASE(DT_QUINT8); default: VLOG(1) << "Unsupported type " << DataTypeString(dtype); return false; } return false; } bool ConstantFolding::IsZeros(const NodeDef& node) const { if (feed_nodes_.find(node.name()) != feed_nodes_.end()) { return false; } if (IsOnesLike(node)) return false; if (IsZerosLike(node)) return true; if (node.op() == "Fill") { NodeDef* values = node_map_->GetNode(NodeName(node.input(1))); return values != nullptr && IsZeros(*values); } if (!IsConstant(node)) return false; if (node.attr().count("dtype") == 0) return false; const auto dtype = node.attr().at("dtype").type(); switch (dtype) { IS_ZEROS_CASE(DT_BOOL); IS_ZEROS_CASE(DT_HALF); IS_ZEROS_CASE(DT_BFLOAT16); IS_ZEROS_CASE(DT_FLOAT); IS_ZEROS_CASE(DT_DOUBLE); IS_ZEROS_CASE(DT_COMPLEX64); IS_ZEROS_CASE(DT_COMPLEX128); IS_ZEROS_CASE(DT_UINT8); IS_ZEROS_CASE(DT_INT8); IS_ZEROS_CASE(DT_UINT16); IS_ZEROS_CASE(DT_INT16); IS_ZEROS_CASE(DT_INT32); IS_ZEROS_CASE(DT_INT64); IS_ZEROS_CASE(DT_QINT32); IS_ZEROS_CASE(DT_QINT16); IS_ZEROS_CASE(DT_QUINT16); IS_ZEROS_CASE(DT_QINT8); IS_ZEROS_CASE(DT_QUINT8); default: VLOG(1) << "Unsupported type " << DataTypeString(dtype); return false; } return false; } bool ConstantFolding::ReplaceOperationWithBroadcastTo( int input_to_broadcast, const GraphProperties& properties, NodeDef* node, GraphDef* graph) { const DataType dtype = GetDataTypeFromNodeOrProps(*node, properties); if (dtype == DT_INVALID) { return false; } const PartialTensorShape shape( properties.GetOutputProperties(node->name())[0].shape()); if (!shape.IsFullyDefined()) { return false; } const string const_name = OptimizedNodeName( *node, strings::StrCat("-broadcastto_shape-", input_to_broadcast)); if (node_map_->GetNode(const_name) != nullptr) { return false; } Tensor shape_t; if (!ConvertShapeToConstant("Shape", DT_INT32, shape, &shape_t).ok()) { return false; } NodeDef tmp; if (!CreateNodeDef(const_name, TensorValue(&shape_t), &tmp).ok()) { return false; } NodeDef* const_node = graph->add_node(); const_node->Swap(&tmp); const_node->set_device(node->device()); node_map_->AddNode(const_name, const_node); for (int i = 0; i < node->input_size(); ++i) { if (i != input_to_broadcast) { string ctrl_dep = AddControlDependency(NodeName(node->input(i)), graph, node_map_.get()); *const_node->add_input() = ctrl_dep; node_map_->AddOutput(NodeName(ctrl_dep), const_name); } } node->set_op("BroadcastTo"); EraseRegularNodeAttributes(node); (*node->mutable_attr())["T"].set_type(dtype); (*node->mutable_attr())["Tidx"].set_type(DT_INT32); node->mutable_input()->SwapElements(0, input_to_broadcast); for (int i = 1; i < node->input_size(); ++i) { if (IsControlInput(node->input(i))) { break; } const string ctrl_dep = AddControlDependency(node->input(i), graph, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(i), ctrl_dep); node->set_input(i, ctrl_dep); } *node->add_input() = const_node->name(); node_map_->AddOutput(const_name, node->name()); node->mutable_input()->SwapElements(1, node->input_size() - 1); return true; } void ConstantFolding::ReplaceOperationWithIdentity( int input_to_forward, const GraphProperties& properties, NodeDef* node, GraphDef* graph) { if (input_to_forward < 0 || input_to_forward >= node->input_size()) return; const DataType dtype = GetDataTypeFromNodeOrProps(*node, properties); if (dtype == DT_INVALID) return; node->set_op("Identity"); EraseRegularNodeAttributes(node); (*node->mutable_attr())["T"].set_type(dtype); node->mutable_input()->SwapElements(0, input_to_forward); for (int i = 1; i < node->input_size(); ++i) { if (IsControlInput(node->input(i))) { break; } const string ctrl_dep = AddControlDependency(node->input(i), graph, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(i), ctrl_dep); node->set_input(i, ctrl_dep); } graph_modified_ = true; } void ConstantFolding::ReplaceOperationWithSnapshot( int input_to_forward, const GraphProperties& properties, NodeDef* node, GraphDef* graph) { if (!graph_contains_assign_or_inplace_op_) { ReplaceOperationWithIdentity(input_to_forward, properties, node, graph); return; } const DataType dtype = GetDataTypeFromNodeOrProps(*node, properties); if (dtype == DT_INVALID) return; node->set_op("Snapshot"); EraseRegularNodeAttributes(node); (*node->mutable_attr())["T"].set_type(dtype); node->mutable_input()->SwapElements(0, input_to_forward); for (int i = 1; i < node->input_size(); ++i) { if (IsControlInput(node->input(i))) { break; } const string ctrl_dep = AddControlDependency(node->input(i), graph, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(i), ctrl_dep); node->set_input(i, ctrl_dep); } graph_modified_ = true; } void ConstantFolding::ReplaceOperationWithNoOp(NodeDef* node, GraphProperties* properties, GraphDef* graph) { if (HasRegularOutputs(*node, *node_map_)) return; ChangeToNoOp(node); EraseRegularNodeAttributes(node); EraseNodeOutputAttributes(node); properties->ClearOutputProperties(node->name()); for (int i = 0; i < node->input_size(); ++i) { if (IsControlInput(node->input(i))) { break; } const string ctrl_dep = AddControlDependency(node->input(i), graph, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(i), ctrl_dep); node->set_input(i, ctrl_dep); } DedupControlInputs(node); graph_modified_ = true; } void ConstantFolding::ReplaceBinaryOperationWithBroadcastTo( int input_to_broadcast, const GraphProperties& properties, NodeDef* node, GraphDef* graph) { if (!ReplaceOperationWithBroadcastTo(input_to_broadcast, properties, node, graph)) { return; } graph_modified_ = true; } void ConstantFolding::ReplaceDivisionOfOnesByReciprocal(NodeDef* node, GraphDef* graph) { node->set_op("Reciprocal"); node->mutable_input()->SwapElements(0, 1); const string ctrl_dep = AddControlDependency(node->input(1), graph, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(1), ctrl_dep); node->set_input(1, ctrl_dep); graph_modified_ = true; } void ConstantFolding::ReplaceSubtractionFromZeroByNegation(NodeDef* node, GraphDef* graph) { node->set_op("Neg"); node->mutable_input()->SwapElements(0, 1); const string ctrl_dep = AddControlDependency(node->input(1), graph, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(1), ctrl_dep); node->set_input(1, ctrl_dep); graph_modified_ = true; } Status ConstantFolding::ReplaceOperationWithConstantTensor(DataType dtype, TensorProto* value, NodeDef* node, GraphDef* graph) { if (dtype == DT_VARIANT) return absl::OkStatus(); node->set_op("Const"); EraseRegularNodeAttributes(node); (*node->mutable_attr())["dtype"].set_type(dtype); (*node->mutable_attr())["value"].mutable_tensor()->Swap(value); for (int i = 0; i < node->input_size(); ++i) { if (IsControlInput(node->input(i))) { break; } const string ctrl_dep = AddControlDependency(node->input(i), graph, node_map_.get()); node_map_->UpdateInput(node->name(), node->input(i), ctrl_dep); node->set_input(i, ctrl_dep); } DedupControlInputs(node); graph_modified_ = true; return absl::OkStatus(); } Status ConstantFolding::ReplaceOperationWithConstant( double value, const GraphProperties& properties, const TensorShapeProto& shape, NodeDef* node, GraphDef* graph) { const DataType dtype = GetDataTypeFromNodeOrProps(*node, properties); if (dtype == DT_VARIANT) return absl::OkStatus(); AttrValue tensor_attr; Status s = CreateConstantTensorAttrValue(dtype, value, shape, &tensor_attr); if (!s.ok()) { VLOG(1) << "Failed to replace node " << node->name() << " of type " << DataTypeString(dtype) << " with constant tensor of value " << value; return absl::OkStatus(); } return ReplaceOperationWithConstantTensor(dtype, tensor_attr.mutable_tensor(), node, graph); } Status ConstantFolding::SimplifyGraph( GraphDef* optimized_graph, GraphProperties* properties, absl::flat_hash_set<string>* nodes_to_not_simplify) { for (int i = 0; i < optimized_graph->node_size(); ++i) { NodeDef* node = optimized_graph->mutable_node(i); if (nodes_to_not_simplify->find(node->name()) == nodes_to_not_simplify->end()) { if (HasTPUAttributes(*node)) { nodes_to_not_simplify->insert(node->name()); continue; } TF_RETURN_IF_ERROR(SimplifyNode(node, optimized_graph, properties)); } } return absl::OkStatus(); } #define RETURN_IF_ERROR_OR_MODIFIED(EXPR) \ TF_RETURN_IF_ERROR(EXPR); \ if (graph_modified_) return OkStatus() #define SET_AND_RETURN_IF_MODIFIED(EXPR) \ graph_modified_ = EXPR; \ if (graph_modified_) return OkStatus() #define RETURN_IF_MODIFIED(EXPR) \ EXPR; \ if (graph_modified_) return OkStatus() Status ConstantFolding::SimplifyNode(NodeDef* node, GraphDef* optimized_graph, GraphProperties* properties) { bool graph_modified_cached = graph_modified_; graph_modified_ = false; bool use_shape_info = properties->has_properties(); RETURN_IF_MODIFIED(RemoveSplitOrSplitV(*properties, optimized_graph, node)); RETURN_IF_ERROR_OR_MODIFIED(RemoveShuffleOrTranspose( *properties, use_shape_info, optimized_graph, node)); RETURN_IF_MODIFIED( RemoveRandomShuffle(*properties, use_shape_info, optimized_graph, node)); RETURN_IF_ERROR_OR_MODIFIED( RemoveReverse(*properties, use_shape_info, optimized_graph, node)); RETURN_IF_ERROR_OR_MODIFIED( SimplifySlice(*properties, use_shape_info, optimized_graph, node)); RETURN_IF_ERROR_OR_MODIFIED( SimplifyStridedSlice(*properties, use_shape_info, optimized_graph, node)); RETURN_IF_ERROR_OR_MODIFIED( SimplifyTile(*properties, use_shape_info, optimized_graph, node)); RETURN_IF_ERROR_OR_MODIFIED( SimplifyPad(*properties, use_shape_info, optimized_graph, node)); RETURN_IF_MODIFIED( SimplifySqueeze(*properties, use_shape_info, optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED(SimplifyPack(optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED(MoveConstantsPastEnter(optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED(SimplifySwitch(optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED( SimplifyReduction(optimized_graph, *properties, node)); SET_AND_RETURN_IF_MODIFIED( SimplifyReshape(*properties, use_shape_info, node)); RETURN_IF_ERROR_OR_MODIFIED(SimplifyArithmeticOperations( *properties, use_shape_info, optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED(ReduceDivToReciprocalMul(optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED( ConstantPushDown(properties, optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED( MulConvPushDown(optimized_graph, node, *properties)); SET_AND_RETURN_IF_MODIFIED(PartialConstPropThroughIdentityN(node)); SET_AND_RETURN_IF_MODIFIED( PartialAssocOpConstFolding(optimized_graph, properties, node)); SET_AND_RETURN_IF_MODIFIED( MergeConcat(use_shape_info, properties, optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED( PartialConcatConstFolding(optimized_graph, properties, node)); SET_AND_RETURN_IF_MODIFIED( ConstantPushDownBiasAdd(properties, optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED(SimplifyCase(optimized_graph, node)); SET_AND_RETURN_IF_MODIFIED( SimplifySelect(*properties, optimized_graph, node)); RETURN_IF_MODIFIED( RemoveRedundantVariableUpdates(properties, optimized_graph, node)); graph_modified_ = graph_modified_cached; return absl::OkStatus(); } void ConstantFolding::RemoveSplitOrSplitV(const GraphProperties& properties, GraphDef* optimized_graph, NodeDef* node) { if (node->attr().count("num_split") == 0) return; if (IsSplit(*node) && node->attr().at("num_split").i() == 1) { ReplaceOperationWithIdentity(1, properties, node, optimized_graph); } if (IsSplitV(*node) && node->attr().at("num_split").i() == 1) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } Status ConstantFolding::RemoveShuffleOrTranspose( const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { if (!use_shape_info || !(IsShuffle(*node) || IsTranspose(*node))) return absl::OkStatus(); Tensor permutation_tensor; if (GetTensorFromConstNode(node->input(1), &permutation_tensor) && properties.HasInputProperties(node->name())) { const auto& shape = properties.GetInputProperties(node->name())[0].shape(); std::vector<int> permutation; for (int j = 0; j < permutation_tensor.NumElements(); ++j) { if (permutation_tensor.dtype() == DT_INT64) { permutation.push_back(permutation_tensor.vec<int64_t>()(j)); } else { permutation.push_back(permutation_tensor.vec<int>()(j)); } } int permutation_size = permutation.size(); if (permutation_size != shape.dim_size()) { return absl::OkStatus(); } bool replaceable = true; for (int j = 0; replaceable && j < shape.dim_size(); ++j) { replaceable &= shape.dim(j).size() == 1 || j == permutation[j]; } if (replaceable) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } return absl::OkStatus(); } void ConstantFolding::RemoveRandomShuffle(const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { if (use_shape_info && IsRandomShuffle(*node) && !properties.GetInputProperties(node->name()).empty()) { const auto& shape = properties.GetInputProperties(node->name())[0].shape(); if (!shape.unknown_rank() && (shape.dim_size() == 0 || shape.dim(0).size() == 1)) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } } Status ConstantFolding::RemoveReverse(const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { if (!use_shape_info || node->op() != "ReverseV2") return absl::OkStatus(); Tensor axis; if (properties.HasInputProperties(node->name()) && GetTensorFromConstNode(node->input(1), &axis)) { const auto& shape = properties.GetInputProperties(node->name())[0].shape(); if (shape.unknown_rank()) return absl::OkStatus(); std::set<int> target_axes; for (int j = 0; j < axis.NumElements(); ++j) { if (axis.dtype() == DT_INT64) { target_axes.insert((axis.vec<int64_t>()(j) + shape.dim_size()) % shape.dim_size()); } else { target_axes.insert((axis.vec<int>()(j) + shape.dim_size()) % shape.dim_size()); } } bool replaceable = true; for (int j = 0; replaceable && j < shape.dim_size(); ++j) { replaceable &= shape.dim(j).size() == 1 || target_axes.find(j) == target_axes.end(); } if (replaceable) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } return absl::OkStatus(); } Status ConstantFolding::SimplifySlice(const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { if (!use_shape_info || !IsSlice(*node)) return absl::OkStatus(); Tensor begin; Tensor size; if (properties.HasInputProperties(node->name()) && GetTensorFromConstNode(node->input(1), &begin) && GetTensorFromConstNode(node->input(2), &size)) { const auto& input = properties.GetInputProperties(node->name())[0]; bool replaceable = !input.shape().unknown_rank(); for (int j = 0; replaceable && j < input.shape().dim_size(); ++j) { if (begin.dtype() == DT_INT32) { replaceable &= begin.vec<int>()(j) == 0; } else { replaceable &= begin.vec<int64_t>()(j) == 0; } if (size.dtype() == DT_INT32) { replaceable &= (size.vec<int>()(j) == -1 || size.vec<int>()(j) == input.shape().dim(j).size()); } else { replaceable &= (size.vec<int64_t>()(j) == -1 || size.vec<int64_t>()(j) == input.shape().dim(j).size()); } } if (replaceable) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } return absl::OkStatus(); } Status ConstantFolding::SimplifyStridedSlice(const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { if (use_shape_info && IsStridedSlice(*node) && properties.GetInputProperties(node->name()).size() == 4) { TF_RETURN_IF_ERROR( CheckAttrsExist(*node, {"new_axis_mask", "shrink_axis_mask"})); if (node->attr().at("new_axis_mask").i() != 0 || node->attr().at("shrink_axis_mask").i() != 0) { return absl::OkStatus(); } const auto& input = properties.GetInputProperties(node->name())[0]; for (int j = 0; j < input.shape().dim_size(); ++j) { if (input.shape().dim(j).size() < 0) { return absl::OkStatus(); } } std::vector<Tensor> input_tensors(3); for (int i = 1; i < 4; ++i) { if (!GetTensorFromConstNode(node->input(i), &input_tensors[i - 1])) { return absl::OkStatus(); } } const Tensor& begin = input_tensors[0]; const Tensor& end = input_tensors[1]; const Tensor& strides = input_tensors[2]; TF_RETURN_IF_ERROR( CheckAttrsExist(*node, {"begin_mask", "end_mask", "ellipsis_mask"})); int begin_mask = node->attr().at("begin_mask").i(); int end_mask = node->attr().at("end_mask").i(); std::set<int> expanded_ellipsis_indices; int ellipsis_index = -1; for (int j = 0; j < input.shape().dim_size(); ++j) { if (node->attr().at("ellipsis_mask").i() & 1 << j || (ellipsis_index == -1 && j >= strides.NumElements())) { ellipsis_index = j; } if (ellipsis_index != -1 && input.shape().dim_size() > strides.NumElements() + j - ellipsis_index) { expanded_ellipsis_indices.insert(j); } } bool replaceable = !input.shape().unknown_rank(); for (int j = 0; replaceable && j < input.shape().dim_size(); ++j) { if (expanded_ellipsis_indices.find(j) != expanded_ellipsis_indices.end()) { continue; } int i = j; int expanded_ellipsis_indices_size = expanded_ellipsis_indices.size(); if (ellipsis_index != -1 && j >= ellipsis_index + expanded_ellipsis_indices_size) { i = j - expanded_ellipsis_indices_size; } int b = begin.dtype() == DT_INT32 ? begin.vec<int>()(i) : begin.vec<int64_t>()(i); int e = end.dtype() == DT_INT32 ? end.vec<int>()(i) : end.vec<int64_t>()(i); int s = strides.dtype() == DT_INT32 ? strides.vec<int>()(i) : strides.vec<int64_t>()(i); replaceable &= (begin_mask & 1 << i || b == 0) && (end_mask & 1 << i || e == input.shape().dim(j).size()) && s == 1; } if (replaceable) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } return absl::OkStatus(); } Status ConstantFolding::SimplifyTile(const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { Tensor multiplies; if (use_shape_info && IsTile(*node) && GetTensorFromConstNode(node->input(1), &multiplies)) { bool replaceable = true; if (multiplies.dtype() == DT_INT32) { for (int j = 0; replaceable && j < multiplies.vec<int>().size(); ++j) { replaceable &= multiplies.vec<int>()(j) == 1; } } else { for (int j = 0; replaceable && j < multiplies.vec<int64_t>().size(); ++j) { replaceable &= multiplies.vec<int64_t>()(j) == 1; } } if (replaceable) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } return absl::OkStatus(); } Status ConstantFolding::SimplifyPad(const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { if (!use_shape_info || !IsPad(*node)) return absl::OkStatus(); Tensor paddings; if (GetTensorFromConstNode(node->input(1), &paddings)) { bool replaceable = true; if (paddings.dtype() == DT_INT32) { const auto flatten = paddings.flat<int32>(); for (int j = 0; replaceable && j < flatten.size(); ++j) { replaceable &= flatten(j) == 0; } } else { const auto flatten = paddings.flat<int64_t>(); for (int j = 0; replaceable && j < flatten.size(); ++j) { replaceable &= flatten(j) == 0; } } if (replaceable) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } return absl::OkStatus(); } void ConstantFolding::SimplifySqueeze(const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { if (use_shape_info && IsSqueeze(*node) && !properties.GetInputProperties(node->name()).empty()) { const auto& shape = properties.GetInputProperties(node->name())[0].shape(); bool replaceable = !shape.unknown_rank(); for (int j = 0; replaceable && j < shape.dim_size(); ++j) { replaceable &= shape.dim(j).size() > 1; } if (replaceable) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } } } bool ConstantFolding::SimplifyPack(GraphDef* optimized_graph, NodeDef* node) { const string axis_node_name = OptimizedNodeName(*node, "_const_axis"); if (!IsPack(*node) || NumNonControlInputs(*node) != 1 || node_map_->NodeExists(axis_node_name)) { return false; } if (feed_nodes_.find(NodeName(node->input(0))) != feed_nodes_.end()) { return false; } Tensor axis_t(DT_INT32, TensorShape({})); const int axis = node->attr().count("axis") == 0 ? 0 : node->attr().at("axis").i(); NodeDef new_node; if (!SetTensorValue(DT_INT32, axis, &axis_t).ok() || !CreateNodeDef(axis_node_name, TensorValue(&axis_t), &new_node).ok()) { return false; } NodeDef* axis_node = optimized_graph->add_node(); *axis_node = std::move(new_node); axis_node->set_name(axis_node_name); node_map_->AddNode(axis_node->name(), axis_node); const string ctrl_dep = ConstantFolding::AddControlDependency( node->input(0), optimized_graph, node_map_.get()); axis_node->add_input(ctrl_dep); axis_node->set_device(node->device()); node_map_->AddOutput(NodeName(node->input(0)), axis_node->name()); node->set_op("ExpandDims"); if (node->attr().count("axis") != 0) { node->mutable_attr()->erase("axis"); } if (node->attr().count("N") != 0) { node->mutable_attr()->erase("N"); } (*node->mutable_attr())["Tdim"].set_type(DT_INT32); node->add_input(axis_node->name()); node_map_->AddOutput(axis_node->name(), node->name()); if (node->input_size() > 2) { node->mutable_input()->SwapElements(1, node->input_size() - 1); } return true; } bool ConstantFolding::SimplifyCase(GraphDef* optimized_graph, NodeDef* node) { if (node->op() != "Case") return false; const NodeDef* output_idx_node = node_map_->GetNode(node->input(0)); if (output_idx_node == nullptr || !CheckAttrExists(*output_idx_node, "value").ok()) { return false; } Tensor output_idx_t; if (!output_idx_t.FromProto(output_idx_node->attr().at("value").tensor())) return false; int output_idx = output_idx_t.scalar<int>()(); const auto& func_list = node->attr().at("branches").list(); if (output_idx < 0 || output_idx >= func_list.func_size()) return false; NodeDef call_node = *node; call_node.set_op("PartitionedCall"); call_node.clear_input(); for (int i = 1; i < node->input_size(); ++i) { call_node.add_input(node->input(i)); } auto* new_func = (*call_node.mutable_attr())["f"].mutable_func(); *new_func = func_list.func(output_idx); const auto& output_shape_list = (*node->mutable_attr())["output_shapes"].list(); if (output_shape_list.shape_size() > output_idx) { TensorShapeProto* new_output_shape = (*call_node.mutable_attr())["_output_shapes"] .mutable_list() ->add_shape(); *new_output_shape = std::move(node->attr().at("output_shapes").list().shape(output_idx)); } call_node.mutable_attr()->erase("output_shapes"); call_node.mutable_attr()->erase("branches"); *node = std::move(call_node); return true; } bool ConstantFolding::SimplifySelect(const GraphProperties& properties, GraphDef* optimized_graph, NodeDef* node) { if (!IsSelect(*node)) return false; const std::vector<OpInfo::TensorProperties>& input_props = properties.GetInputProperties(node->name()); if (input_props.size() < 3) return false; const NodeDef* predicate_node = node_map_->GetNode(node->input(0)); const bool is_all_true = IsOnes(*predicate_node); const bool is_all_false = IsZeros(*predicate_node); if (!is_all_true && !is_all_false) { return false; } const int live_input_idx = is_all_true ? 1 : 2; const int ignored_input_idx = is_all_true ? 2 : 1; const TensorShapeProto& predicate_shape = input_props[0].shape(); const bool predicate_is_scalar = !predicate_shape.unknown_rank() && predicate_shape.dim_size() == 0; if (ShapesSymbolicallyEqual(input_props[1], input_props[2]) && (ShapesSymbolicallyEqual(input_props[0], input_props[1]) || predicate_is_scalar)) { node->set_op("Identity"); *node->mutable_input(0) = AddControlDependency(node->input(0), optimized_graph, node_map_.get()); *node->mutable_input(ignored_input_idx) = AddControlDependency( node->input(ignored_input_idx), optimized_graph, node_map_.get()); node->mutable_input()->SwapElements(0, live_input_idx); } else if (!ReplaceOperationWithBroadcastTo(live_input_idx, properties, node, optimized_graph)) { return false; } DedupControlInputs(node); return true; } void ConstantFolding::RemoveRedundantVariableUpdates( GraphProperties* properties, GraphDef* optimized_graph, NodeDef* node) { static const absl::flat_hash_set<string>* kVariableReadOps = new absl::flat_hash_set<string>{"AssignAddVariableOp", "AssignSubVariableOp", "AssignAdd", "AssignSub", "ScatterAdd", "ScatterSub", "ScatterMul", "ScatterDiv", "ScatterNdAdd", "ScatterNdSub", "ScatterNdMul", "ScatterNdDiv", "ResourceScatterAdd", "ResourceScatterSub", "ResourceScatterMul", "ResourceScatterDiv", "ResourceScatterNdAdd", "ResourceScatterNdSub", "ResourceScatterNdMul", "ResourceScatterNdDiv"}; if (kVariableReadOps == nullptr || kVariableReadOps->find(node->op()) == kVariableReadOps->end()) return; const int value_index = absl::StrContains(node->op(), "Scatter") ? 2 : 1; const NodeDef* delta_node = node_map_->GetNode(node->input(value_index)); if (delta_node == nullptr) return; const bool is_add_or_sub = absl::StrContains(node->op(), "Add") || absl::StrContains(node->op(), "Sub"); if ((is_add_or_sub && IsZeros(*delta_node)) || (!is_add_or_sub && IsOnes(*delta_node))) { VLOG(1) << "Removing redundant variable update: " << node->DebugString(); if (absl::StrContains(node->op(), "Variable") || absl::StrContains(node->op(), "Resource")) { ReplaceOperationWithNoOp(node, properties, optimized_graph); } else { ReplaceOperationWithIdentity(0 , *properties, node, optimized_graph); } } } bool ConstantFolding::MoveConstantsPastEnter(GraphDef* optimized_graph, NodeDef* node) { if (!IsEnter(*node) || node->input_size() == 0 || node->attr().count("is_constant") == 0 || !node->attr().at("is_constant").b()) { return false; } const string& node_name = node->name(); const NodeDef* input = node_map_->GetNode(node->input(0)); if (input == nullptr || !IsReallyConstant(*input) || OptimizedNodeExists(*input, "_enter")) { return false; } std::vector<NodeDef*> consumers; for (const NodeDef* fanout : node_map_->GetOutputs(node_name)) { if (!IsConstant(*fanout)) { for (int i = 0; i < fanout->input_size(); ++i) { if (fanout->input(i) == node_name) { consumers.push_back(const_cast<NodeDef*>(fanout)); break; } } } } if (consumers.empty()) { return false; } graph_modified_ = true; NodeDef* new_node = optimized_graph->add_node(); *new_node = *input; new_node->set_name(OptimizedNodeName(*input, "_enter")); new_node->set_device(node->device()); new_node->clear_input(); new_node->add_input(AsControlDependency(node_name)); node_map_->AddNode(new_node->name(), new_node); node_map_->AddOutput(node_name, new_node->name()); for (NodeDef* consumer : consumers) { for (int i = 0; i < consumer->input_size(); ++i) { if (NodeName(consumer->input(i)) == node_name) { node_map_->UpdateInput(consumer->name(), node_name, new_node->name()); consumer->set_input(i, new_node->name()); } } } return true; } bool ConstantFolding::SimplifySwitch(GraphDef* optimized_graph, NodeDef* node) { if (node->op() == "Switch" && node->input(0) == node->input(1) && !OptimizedNodeExists(*node, "_const_false") && !OptimizedNodeExists(*node, "_const_true")) { bool already_optimized = true; const auto& fanouts = node_map_->GetOutputs(node->name()); if (fanouts.size() == 2) { for (const NodeDef* fanout : fanouts) { if ((!IsIdentity(*fanout) && !IsIdentityNSingleInput(*fanout)) || HasRegularOutputs(*fanout, *node_map_)) { already_optimized = false; break; } } } Tensor false_t(DT_BOOL, TensorShape({})); Tensor true_t(DT_BOOL, TensorShape({})); if (!already_optimized && SetTensorValue(DT_BOOL, true, &true_t).ok() && SetTensorValue(DT_BOOL, false, &false_t).ok()) { std::vector<NodeDef*> consumers = node_map_->GetOutputsOrderedByNodeName(node->name()); NodeDef tmp_false_node; tmp_false_node.set_name(OptimizedNodeName(*node, "_const_false")); if (!CreateNodeDef(tmp_false_node.name(), TensorValue(&false_t), &tmp_false_node) .ok()) { return false; } tmp_false_node.set_device(node->device()); NodeDef tmp_true_node; tmp_true_node.set_name(OptimizedNodeName(*node, "_const_true")); if (!CreateNodeDef(tmp_true_node.name(), TensorValue(&true_t), &tmp_true_node) .ok()) { return false; } tmp_true_node.set_device(node->device()); NodeDef* false_node = optimized_graph->add_node(); false_node->Swap(&tmp_false_node); NodeDef* true_node = optimized_graph->add_node(); true_node->Swap(&tmp_true_node); const string false_port = node->name(); const string true_port = strings::StrCat(node->name(), ":1"); const string false_ctrl_dep = AddControlDependency(false_port, optimized_graph, node_map_.get()); false_node->add_input(false_ctrl_dep); const string true_ctrl_dep = AddControlDependency(true_port, optimized_graph, node_map_.get()); true_node->add_input(true_ctrl_dep); node_map_->AddNode(false_node->name(), false_node); node_map_->AddNode(true_node->name(), true_node); node_map_->AddOutput(NodeName(false_ctrl_dep), false_node->name()); node_map_->AddOutput(NodeName(true_ctrl_dep), true_node->name()); for (NodeDef* consumer : consumers) { for (int i = 0; i < consumer->input_size(); ++i) { const string& input = consumer->input(i); if (input == false_port) { consumer->set_input(i, false_node->name()); node_map_->UpdateInput(consumer->name(), false_port, false_node->name()); } else if (input == true_port) { consumer->set_input(i, true_node->name()); node_map_->UpdateInput(consumer->name(), true_port, true_node->name()); } } } return true; } } return false; } bool ConstantFolding::IsReductionWithConstantIndices( const NodeDef& node, bool* indices_is_empty) const { if (!IsReduction(node) || node.input_size() < 2) { return false; } NodeDef* reductions_indices = node_map_->GetNode(node.input(1)); if (!IsReallyConstant(*reductions_indices) || !reductions_indices->attr().count("value")) { return false; } const TensorShapeProto& reduction_indices_shape = reductions_indices->attr().at("value").tensor().tensor_shape(); *indices_is_empty = TensorShape(reduction_indices_shape).num_elements() == 0; return true; } bool ConstantFolding::IsReductionCandidateForSimplification( const NodeDef& node, const GraphProperties& properties, TensorShapeProto* input_tensor_shape, TensorShapeProto* output_tensor_shape, bool* is_single_element_op) const { if (!properties.HasInputProperties(node.name()) || !properties.HasOutputProperties(node.name())) { return false; } const auto& input_props = properties.GetInputProperties(node.name())[0]; const auto& output_props = properties.GetOutputProperties(node.name())[0]; if (!input_props.has_shape() || input_props.shape().unknown_rank() || !output_props.has_shape() || output_props.shape().unknown_rank()) { return false; } *input_tensor_shape = input_props.shape(); *output_tensor_shape = output_props.shape(); for (int i = 0; i < input_tensor_shape->dim_size(); ++i) { if (input_tensor_shape->dim(i).size() < 0) { return false; } } for (int i = 0; i < output_tensor_shape->dim_size(); ++i) { if (output_tensor_shape->dim(i).size() < 0) { return false; } } const int input_num_elements = TensorShape(*input_tensor_shape).num_elements(); const int output_num_elements = TensorShape(*output_tensor_shape).num_elements(); *is_single_element_op = input_num_elements == 1 && output_num_elements == 1; return true; } bool ConstantFolding::IsReductionSimplifiableToIdentity( const NodeDef& node, const TensorShapeProto& input_shape, bool keep_dims, const TensorVector& reduction_indices_vector) const { int output_size = reduction_indices_vector[0]->NumElements(); if (output_size == 0) { return true; } if (!keep_dims) { return false; } bool simplifiable = true; for (int i = 0; i < output_size; ++i) { int64_t dim; if (reduction_indices_vector[0]->dtype() == DT_INT32) { dim = reduction_indices_vector[0]->flat<int32>()(i); } else { dim = reduction_indices_vector[0]->flat<int64_t>()(i); } if (dim < 0) { dim += input_shape.dim_size(); } if (dim < 0 || dim >= input_shape.dim_size() || input_shape.dim(dim).size() != 1) { simplifiable = false; break; } } return simplifiable; } bool ConstantFolding::ReplaceReductionWithIdentity(NodeDef* node) const { DataType output_type; if (node->attr().count("T") != 0) { output_type = node->attr().at("T").type(); } else if (IsAny(*node) || IsAll(*node)) { output_type = DT_BOOL; } else { return false; } node->set_op("Identity"); EraseRegularNodeAttributes(node); (*node->mutable_attr())["T"].set_type(output_type); *node->mutable_input(1) = AsControlDependency(node->input(1)); return true; } bool ConstantFolding::SimplifyReduction(GraphDef* optimized_graph, const GraphProperties& properties, NodeDef* node) { bool indices_is_empty = false; if (!IsReductionWithConstantIndices(*node, &indices_is_empty)) { return false; } if (indices_is_empty) { return ReplaceReductionWithIdentity(node); } bool is_single_element_op = false; TensorShapeProto input_tensor_shape, output_tensor_shape; if (!IsReductionCandidateForSimplification( *node, properties, &input_tensor_shape, &output_tensor_shape, &is_single_element_op)) { return false; } string reduction_indices_input = node->input(1); NodeDef* reduction_indices = node_map_->GetNode(reduction_indices_input); TensorVector reduction_indices_vector; auto outputs_cleanup = gtl::MakeCleanup([&reduction_indices_vector] { for (const auto& out : reduction_indices_vector) { delete out.tensor; } }); if (!EvaluateNode(*reduction_indices, TensorVector(), &reduction_indices_vector) .ok() || reduction_indices_vector.size() != 1) { return false; } bool keep_dims = node->attr().count("keep_dims") > 0 && node->attr().at("keep_dims").b(); bool simplifiable_to_reshape = is_single_element_op && !keep_dims && (node->attr().count("T") > 0); bool simplifiable_to_identity = IsReductionSimplifiableToIdentity( *node, input_tensor_shape, keep_dims, reduction_indices_vector); if (simplifiable_to_reshape) { const int new_num_dimensions = output_tensor_shape.dim_size(); Tensor tensor(DT_INT32, TensorShape({new_num_dimensions})); for (int i = 0; i < new_num_dimensions; i++) { tensor.flat<int>()(i) = 1; } TensorValue shape_value(&tensor); NodeDef* shape_node = optimized_graph->add_node(); if (!CreateNodeDef(OptimizedNodeName(*node, "_shape_const"), shape_value, shape_node) .ok()) { return false; } shape_node->set_device(node->device()); node_map_->AddNode(shape_node->name(), shape_node); shape_node->add_input(AsControlDependency(reduction_indices_input)); node_map_->AddOutput(NodeName(reduction_indices_input), shape_node->name()); node->set_op("Reshape"); node_map_->UpdateInput(node->name(), node->input(1), shape_node->name()); node->set_input(1, shape_node->name()); node->mutable_attr()->erase("keep_dims"); node->mutable_attr()->erase("Tidx"); AttrValue attr_type_indices; attr_type_indices.set_type(DT_INT32); (*node->mutable_attr())["Tshape"] = attr_type_indices; return true; } else if (simplifiable_to_identity) { return ReplaceReductionWithIdentity(node); } return false; } bool ConstantFolding::SimplifyReshape(const GraphProperties& properties, bool use_shape_info, NodeDef* node) { if (!use_shape_info || node->attr().count("T") == 0 || !IsSimplifiableReshape(*node, properties).ok()) { return false; } DataType output_type = node->attr().at("T").type(); node->set_op("Identity"); EraseRegularNodeAttributes(node); (*node->mutable_attr())["T"].set_type(output_type); *node->mutable_input(1) = AsControlDependency(node->input(1)); return true; } Status ConstantFolding::SimplifyArithmeticOperations( const GraphProperties& properties, bool use_shape_info, GraphDef* optimized_graph, NodeDef* node) { const bool is_mul = IsAnyMul(*node) || IsLogicalAnd(*node); const bool is_matmul = IsAnyMatMul(*node); const bool is_add = IsAdd(*node) || IsBiasAdd(*node) || IsLogicalOr(*node); const bool is_sub = IsSub(*node); const bool is_any_div = IsAnyDiv(*node) && !IsFloorDiv(*node); if (use_shape_info && (is_mul || is_matmul || is_add || is_sub || is_any_div) && properties.HasInputProperties(node->name()) && properties.HasOutputProperties(node->name())) { const NodeDef* x = node_map_->GetNode(node->input(0)); const NodeDef* y = node_map_->GetNode(node->input(1)); if (x == nullptr || y == nullptr) { return absl::InvalidArgumentError( absl::StrCat("Invalid inputs to node: ", node->DebugString())); } const TensorShapeProto& output_shape = properties.GetOutputProperties(node->name())[0].shape(); const TensorShapeProto& y_shape = properties.GetInputProperties(node->name())[1].shape(); const TensorShapeProto& x_shape = properties.GetInputProperties(node->name())[0].shape(); const bool y_matches_output_shape = ShapesSymbolicallyEqual(output_shape, y_shape); const bool x_matches_output_shape = ShapesSymbolicallyEqual(output_shape, x_shape); const bool x_is_zero = IsZeros(*x); const bool x_is_one = x_is_zero ? false : IsOnes(*x); if ((is_mul && x_is_one) || (is_add && x_is_zero)) { if (y_matches_output_shape) { ReplaceOperationWithSnapshot(1, properties, node, optimized_graph); } else if (x_matches_output_shape) { ReplaceBinaryOperationWithBroadcastTo(1, properties, node, optimized_graph); } return absl::OkStatus(); } if (y_matches_output_shape && (is_sub && x_is_zero)) { ReplaceSubtractionFromZeroByNegation(node, optimized_graph); return absl::OkStatus(); } if (y_matches_output_shape && is_any_div && x_is_one) { TF_RETURN_IF_ERROR(CheckAttrExists(*node, "T")); DataType type = node->attr().at("T").type(); if (DataTypeIsFloating(type) || DataTypeIsComplex(type)) { ReplaceDivisionOfOnesByReciprocal(node, optimized_graph); return absl::OkStatus(); } } const bool y_is_zero = IsZeros(*y); const bool y_is_one = y_is_zero ? false : IsOnes(*y); if (((is_mul || is_any_div) && y_is_one) || ((is_add || is_sub) && y_is_zero)) { if (x_matches_output_shape) { ReplaceOperationWithSnapshot(0, properties, node, optimized_graph); } else if (y_matches_output_shape) { ReplaceBinaryOperationWithBroadcastTo(0, properties, node, optimized_graph); } return absl::OkStatus(); } const PartialTensorShape shp(output_shape); if (shp.IsFullyDefined() && IsLogicalOr(*node) && (y_is_one || x_is_one)) { TF_RETURN_IF_ERROR(ReplaceOperationWithConstant( 1, properties, output_shape, node, optimized_graph)); return absl::OkStatus(); } const bool is_aggressive = opt_level_ == RewriterConfig::AGGRESSIVE; const bool simplify_zeros_divided_by_y = is_any_div && x_is_zero && is_aggressive; bool simplify_multiply_by_zero = (is_mul || is_matmul) && (x_is_zero || y_is_zero); if (simplify_multiply_by_zero) { const DataType y_dtype = properties.GetInputProperties(node->name())[1].dtype(); const DataType x_dtype = properties.GetInputProperties(node->name())[0].dtype(); if ((x_is_zero && (DataTypeIsFloating(y_dtype) || DataTypeIsComplex(y_dtype))) || (y_is_zero && (DataTypeIsFloating(x_dtype) || DataTypeIsComplex(x_dtype)))) { simplify_multiply_by_zero = is_aggressive; } } if (simplify_multiply_by_zero || simplify_zeros_divided_by_y) { if (shp.IsFullyDefined()) { bool is_quantized = IsQuantizedMatMul(*node); TF_RETURN_IF_ERROR(ReplaceOperationWithConstant( 0, properties, output_shape, node, optimized_graph)); if (is_quantized && graph_modified_) { TF_RETURN_IF_ERROR( AddQuantizedMatMulMinMaxOutConstNodes(node, optimized_graph)); } return absl::OkStatus(); } if ((is_mul || is_any_div) && x_is_zero) { if (x_matches_output_shape) { ReplaceOperationWithIdentity(0, properties, node, optimized_graph); } else if (y_matches_output_shape) { ReplaceBinaryOperationWithBroadcastTo(0, properties, node, optimized_graph); } return absl::OkStatus(); } else if (is_mul && y_is_zero) { if (y_matches_output_shape) { ReplaceOperationWithIdentity(1, properties, node, optimized_graph); } else if (x_matches_output_shape) { ReplaceBinaryOperationWithBroadcastTo(1, properties, node, optimized_graph); } return absl::OkStatus(); } } } return absl::OkStatus(); } bool ConstantFolding::ReduceDivToReciprocalMul(GraphDef* optimized_graph, NodeDef* node) { if (node->input_size() >= 2 && (IsDiv(*node) || IsRealDiv(*node) || IsXdivy(*node))) { const string& const_input = node->input(1); const NodeDef* denom = node_map_->GetNode(const_input); CHECK(denom != nullptr); if (!IsReallyConstant(*denom)) { return false; } if (node->attr().count("T") == 0) { return false; } DataType type = node->attr().at("T").type(); if (IsDiv(*node) && !(DataTypeIsFloating(type) || DataTypeIsComplex(type))) { return false; } NodeDef* reciprocal_node = optimized_graph->add_node(); reciprocal_node->set_name(OptimizedNodeName(*node, "_recip")); reciprocal_node->set_op("Reciprocal"); reciprocal_node->set_device(node->device()); reciprocal_node->add_input(const_input); (*reciprocal_node->mutable_attr())["T"].set_type(type); if (IsXdivy(*node)) { node->set_op("MulNoNan"); node->set_input(1, node->input(0)); node->set_input(0, reciprocal_node->name()); } else { node->set_op("Mul"); node->set_input(1, reciprocal_node->name()); } node_map_->AddNode(reciprocal_node->name(), reciprocal_node); node_map_->UpdateOutput(node->name(), const_input, reciprocal_node->name()); return true; } return false; } bool ConstantFolding::PrepareConstantPushDown( const NodeDef& parent, const GraphProperties& properties, bool must_have_properties, ConstantPushDownContext* ctx) const { if (ctx == nullptr || !has_fetch_ || NumNonControlInputs(parent) != 2) { return false; } NodeDef* left_child = node_map_->GetNode(parent.input(0)); NodeDef* right_child = node_map_->GetNode(parent.input(1)); if (left_child == nullptr || right_child == nullptr) { return false; } ctx->left_child_is_const = IsReallyConstant(*left_child); ctx->right_child_is_const = IsReallyConstant(*right_child); ctx->op_child = ctx->left_child_is_const ? right_child : left_child; ctx->const_child = ctx->left_child_is_const ? left_child : right_child; if (!ctx->left_child_is_const && !ctx->right_child_is_const) { return false; } if (parent.device() != ctx->op_child->device() || parent.device() != ctx->const_child->device()) { return false; } if (ctx->op_child->input_size() < 2 || nodes_to_preserve_.find(ctx->op_child->name()) != nodes_to_preserve_.end() || NumNonControlOutputs(*ctx->op_child, *node_map_) > 1) { return false; } if (!CheckAttrExists(parent, "T").ok()) return false; DataType dtype = parent.attr().at("T").type(); if (dtype == DT_BFLOAT16 || dtype == DT_HALF) { return false; } const auto& child_output = node_map_->GetOutputs(ctx->op_child->name()); if (child_output.find(ctx->const_child) != child_output.end()) { return false; } ctx->left_leaf = node_map_->GetNode(ctx->op_child->input(0)); ctx->right_leaf = node_map_->GetNode(ctx->op_child->input(1)); ctx->left_leaf_is_const = IsReallyConstant(*ctx->left_leaf); ctx->right_leaf_is_const = IsReallyConstant(*ctx->right_leaf); if (ctx->left_leaf_is_const && ctx->right_leaf_is_const) { return false; } if (parent.device() != ctx->left_leaf->device() || parent.device() != ctx->right_leaf->device()) { return false; } ctx->parent_input_props = &properties.GetInputProperties(parent.name()); ctx->op_child_input_props = &properties.GetInputProperties(ctx->op_child->name()); if (must_have_properties && (ctx->parent_input_props == nullptr || ctx->parent_input_props->size() < 2 || ctx->op_child_input_props == nullptr || ctx->op_child_input_props->size() < 2)) { return false; } VLOG(1) << "\n++++++++ PushDown for node " << parent.name() << ": " << parent.op() << "(" << left_child->op() << ", " << right_child->op() << ")"; return true; } bool ConstantFolding::ConstantPushDownBiasAdd(GraphProperties* properties, GraphDef* optimized_graph, NodeDef* node) { const bool parent_is_bias_add = IsBiasAdd(*node); if (!parent_is_bias_add && !IsAdd(*node)) return false; ConstantPushDownContext ctx; if (!PrepareConstantPushDown(*node, *properties, true, &ctx)) { return false; } if (ctx.left_child_is_const && parent_is_bias_add) return false; const bool child_is_bias_add = IsBiasAdd(*ctx.op_child); if (!child_is_bias_add && !IsAdd(*ctx.op_child)) return false; if (ctx.parent_input_props->empty() || ctx.op_child_input_props->empty() || (*ctx.parent_input_props)[0].shape().unknown_rank() || (*ctx.parent_input_props)[1].shape().unknown_rank() || (*ctx.op_child_input_props)[0].shape().unknown_rank() || (*ctx.op_child_input_props)[1].shape().unknown_rank()) { return false; } const int left_leaf_rank = (*ctx.op_child_input_props)[0].shape().dim_size(); const int right_leaf_rank = (*ctx.op_child_input_props)[1].shape().dim_size(); if (left_leaf_rank != 1 && right_leaf_rank != 1) return false; const int vector_idx = left_leaf_rank == 1 ? 0 : 1; const int matrix_idx = 1 - vector_idx; const auto& vector_prop = (*ctx.op_child_input_props)[vector_idx]; const int vector_rank = vector_idx == 0 ? left_leaf_rank : right_leaf_rank; if (vector_rank != 1) return false; const DataType vector_type = vector_prop.dtype(); const auto& matrix_prop = (*ctx.op_child_input_props)[matrix_idx]; const int matrix_rank = matrix_prop.shape().dim_size(); const DataType matrix_type = matrix_prop.dtype(); const int const_idx = ctx.left_child_is_const ? 0 : 1; const auto& const_prop = (*ctx.parent_input_props)[const_idx]; const int const_rank = const_prop.shape().dim_size(); const DataType const_type = const_prop.dtype(); int input_to_swap = -1; if (!parent_is_bias_add && child_is_bias_add && const_rank == matrix_rank && const_type == matrix_type) { input_to_swap = matrix_idx; } else if (const_rank == 1 && const_type == vector_type) { input_to_swap = vector_idx; } if (input_to_swap == -1) return false; const NodeDef* leaf_to_swap = node_map_->GetNode(ctx.op_child->input(input_to_swap)); if (IsConstant(*leaf_to_swap)) return false; node_map_->UpdateInput(node->name(), node->input(const_idx), ctx.op_child->input(input_to_swap)); node_map_->AddOutput(node->input(const_idx), ctx.op_child->name()); if (ctx.op_child->input(input_to_swap) != ctx.op_child->input(1 - input_to_swap)) { node_map_->RemoveOutput(ctx.op_child->input(input_to_swap), ctx.op_child->name()); } std::swap(*node->mutable_input(const_idx), *ctx.op_child->mutable_input(input_to_swap)); properties->ClearInputProperties(node->name()); properties->ClearInputProperties(ctx.op_child->name()); return true; } bool ConstantFolding::ConstantPushDown(GraphProperties* properties, GraphDef* optimized_graph, NodeDef* node) { const bool is_add = IsAdd(*node); const bool is_mul = IsMul(*node); const bool is_sub = IsSub(*node); const bool is_div = IsDiv(*node); if (!(is_add || is_sub || is_mul || is_div)) return false; const bool is_symmetric = is_add || is_mul; ConstantPushDownContext ctx; if (!PrepareConstantPushDown(*node, *properties, false, &ctx)) { return false; } const bool is_child_add = IsAdd(*ctx.op_child); const bool is_child_mul = IsMul(*ctx.op_child); const bool is_child_sub = IsSub(*ctx.op_child); const bool is_child_div = IsDiv(*ctx.op_child); const bool is_add_sub = (is_add || is_sub) && (is_child_add || is_child_sub); const bool is_mul_div = (is_mul || is_div) && (is_child_mul || is_child_div); if (!is_add_sub && !is_mul_div) { return false; } const bool is_child_symmetric = is_child_add || is_child_mul; if (!CheckAttrExists(*node, "T").ok()) return false; DataType dtype = node->attr().at("T").type(); if (!(is_symmetric && is_child_symmetric) && !(DataTypeIsFloating(dtype) || DataTypeIsComplex(dtype))) { return false; } const NodeDef* y_node = ctx.left_leaf_is_const ? ctx.left_leaf : ctx.right_leaf; if (!IsReallyConstant(*y_node) && !ctx.parent_input_props->empty() && !ctx.op_child_input_props->empty()) { const PartialTensorShape c_shape( (*ctx.parent_input_props)[ctx.left_child_is_const ? 0 : 1].shape()); const PartialTensorShape x_shape( (*ctx.op_child_input_props)[ctx.left_leaf_is_const ? 0 : 1].shape()); if (c_shape.IsFullyDefined() && x_shape.IsFullyDefined() && c_shape.num_elements() > x_shape.num_elements()) { return false; } else if (!c_shape.unknown_rank() && !x_shape.unknown_rank() && c_shape.dims() > 0) { for (int idx = 0; idx < std::min(x_shape.dims(), c_shape.dims()); ++idx) { if (x_shape.dim_size(idx) >= 0 && c_shape.dim_size(idx) > x_shape.dim_size(idx)) { return false; } } } } const string input_x = ctx.left_leaf_is_const ? ctx.op_child->input(1) : ctx.op_child->input(0); const string input_y = input_x == ctx.op_child->input(0) ? ctx.op_child->input(1) : ctx.op_child->input(0); const string input_c = ctx.left_child_is_const ? node->input(0) : node->input(1); const string input_op = ctx.left_child_is_const ? node->input(1) : node->input(0); VLOG(1) << "input_c = " << input_c << "\ninput_x = " << input_x; node_map_->UpdateInput(node->name(), input_c, input_x); node_map_->AddOutput(input_c, ctx.op_child->name()); if (input_x != input_y) { node_map_->RemoveOutput(input_x, ctx.op_child->name()); } properties->ClearInputProperties(node->name()); properties->ClearInputProperties(ctx.op_child->name()); if (is_symmetric && is_child_symmetric) { node->set_input(0, input_x); node->set_input(1, input_op); ctx.op_child->set_input(0, input_c); ctx.op_child->set_input(1, input_y); } else { auto is_leaf_negated = [&](const bool is_right_leaf) -> bool { bool leaf_negated = !is_child_symmetric && is_right_leaf; bool child_negated = !is_symmetric && (ctx.left_child_is_const); return leaf_negated != child_negated; }; const string symmetric_op = (is_add || is_sub) ? "Add" : "Mul"; const string nonsymmetric_op = (is_add || is_sub) ? "Sub" : "Div"; bool neg_c = !is_symmetric && !ctx.left_child_is_const; bool neg_x = is_leaf_negated(ctx.left_leaf_is_const); bool neg_y = is_leaf_negated(!ctx.left_leaf_is_const); node->set_op((neg_x || (neg_c && neg_y)) ? nonsymmetric_op : symmetric_op); node->set_input(0, neg_x ? input_op : input_x); node->set_input(1, neg_x ? input_x : input_op); ctx.op_child->set_op(neg_c != neg_y ? nonsymmetric_op : symmetric_op); ctx.op_child->set_input(0, neg_c ? input_y : input_c); ctx.op_child->set_input(1, neg_c ? input_c : input_y); } return true; } bool ConstantFolding::MulConvPushDown(GraphDef* optimized_graph, NodeDef* node, const GraphProperties& properties) { if (!IsAnyMul(*node) || NumNonControlInputs(*node) != 2) return false; NodeDef* mul_left_child = node_map_->GetNode(node->input(0)); NodeDef* mul_right_child = node_map_->GetNode(node->input(1)); if (mul_left_child == nullptr || mul_right_child == nullptr) { return false; } const bool left_child_is_constant = IsReallyConstant(*mul_left_child); const bool right_child_is_constant = IsReallyConstant(*mul_right_child); if (!left_child_is_constant && !right_child_is_constant) { return false; } NodeDef* conv_node = left_child_is_constant ? mul_right_child : mul_left_child; if (!IsConv2D(*conv_node) && !IsConv3D(*conv_node)) { return false; } if (node->device() != mul_left_child->device() || node->device() != mul_right_child->device()) { return false; } if (conv_node->input_size() < 2 || NumNonControlOutputs(*conv_node, *node_map_) > 1 || nodes_to_preserve_.find(conv_node->name()) != nodes_to_preserve_.end()) { return false; } NodeDef* conv_left_child = node_map_->GetNode(conv_node->input(0)); NodeDef* conv_right_child = node_map_->GetNode(conv_node->input(1)); const bool conv_left_is_constant = IsReallyConstant(*conv_left_child); const bool conv_right_is_constant = IsReallyConstant(*conv_right_child); if (!conv_left_is_constant && !conv_right_is_constant) { return false; } if (conv_left_is_constant && conv_right_is_constant) { return false; } const auto& mul_props = properties.GetOutputProperties(node->name()); const auto& conv_props = properties.GetOutputProperties(conv_node->name()); if (mul_props.empty() || conv_props.empty()) { return false; } const auto& mul_shape = mul_props[0].shape(); const auto& conv_shape = conv_props[0].shape(); if (!ShapesSymbolicallyEqual(mul_shape, conv_shape)) { return false; } const auto& input_props = properties.GetInputProperties(conv_node->name()); if (input_props.size() < 2) { return false; } const auto& filter_shape = input_props[1].shape(); NodeDef* const_node = left_child_is_constant ? mul_left_child : mul_right_child; const auto& const_props = properties.GetOutputProperties(const_node->name()); if (const_props.empty()) { return false; } const auto& const_shape = const_props[0].shape(); if (!IsValidConstShapeForMulConvPushDown( conv_node->attr().at("data_format").s(), filter_shape, const_shape)) { return false; } string mul_new_name = AddPrefixToNodeName("merged_input", conv_node->name()); if (node_map_->NodeExists(mul_new_name)) { return false; } string conv_const_input = conv_left_is_constant ? conv_node->input(0) : conv_node->input(1); if (MaybeRemoveControlInput(conv_node->name(), const_node, optimized_graph, node_map_.get())) { MaybeAddControlInput(conv_const_input, const_node, optimized_graph, node_map_.get()); } conv_node->set_name(node->name()); node->set_name(mul_new_name); if (conv_left_is_constant) { node_map_->UpdateInput(conv_node->name(), node->input(0), mul_new_name); conv_node->set_input(0, mul_new_name); } else { node_map_->UpdateInput(conv_node->name(), node->input(1), mul_new_name); conv_node->set_input(1, mul_new_name); } NodeDef* conv_const_node = conv_left_is_constant ? conv_left_child : conv_right_child; if (left_child_is_constant) { node->set_input(1, conv_const_node->name()); } else { node->set_input(0, conv_const_node->name()); } node_map_->AddNode(mul_new_name, node); return true; } bool ConstantFolding::PartialConstPropThroughIdentityN(NodeDef* node) { if (!(IsIdentityN(*node) || IsIdentityNSingleInput(*node)) || !HasRegularInputs(*node)) return false; std::vector<int> inputs_to_forward; for (int input_idx = 0; input_idx < node->input_size(); ++input_idx) { const string& input = node->input(input_idx); if (IsControlInput(input)) { return false; } const NodeDef* input_node = node_map_->GetNode(NodeName(input)); if (input_node == nullptr) { LOG(ERROR) << "Bad input: " << input; return false; } if (IsReallyConstant(*input_node)) { inputs_to_forward.push_back(input_idx); } } return ForwardInputs(node, inputs_to_forward); } bool ConstantFolding::PartialAssocOpConstFolding(GraphDef* optimized_graph, GraphProperties* properties, NodeDef* node) { if (!IsAggregate(*node) || !IsCommutative(*node)) return false; const int num_non_control_inputs = NumNonControlInputs(*node); if (num_non_control_inputs <= 2) return false; const int num_control_inputs = node->input_size() - num_non_control_inputs; std::vector<int> const_inputs; std::vector<int> nonconst_inputs; for (int i = 0; i < node->input_size(); ++i) { const string& input = node->input(i); const NodeDef* input_node = node_map_->GetNode(NodeName(input)); if (input_node == nullptr) return false; if (!IsControlInput(input) && IsReallyConstant(*input_node)) { const_inputs.push_back(i); } else { nonconst_inputs.push_back(i); } } int const_inputs_size = const_inputs.size(); if (const_inputs_size == num_non_control_inputs && node->op() == "AccumulateNV2") { node->set_op("AddN"); node->mutable_attr()->erase("shape"); return true; } const string new_node_name = OptimizedNodeName( *node, strings::StrCat("_partial_split_", const_inputs_size)); if (const_inputs_size > 1 && const_inputs_size < num_non_control_inputs && !node_map_->NodeExists(new_node_name)) { NodeDef* added_node = optimized_graph->add_node(); *added_node = *node; added_node->set_op("AddN"); added_node->mutable_attr()->erase("shape"); added_node->set_name(new_node_name); node_map_->AddNode(added_node->name(), added_node); added_node->clear_input(); for (int i : const_inputs) { added_node->add_input(node->input(i)); node_map_->UpdateOutput(NodeName(node->input(i)), node->name(), added_node->name()); } node->set_input(const_inputs[0], added_node->name()); node_map_->AddOutput(added_node->name(), node->name()); nonconst_inputs.push_back(const_inputs[0]); std::sort(nonconst_inputs.begin(), nonconst_inputs.end()); int idx = 0; for (int i : nonconst_inputs) { if (idx != i) { node->set_input(idx, node->input(i)); } ++idx; } node->mutable_input()->DeleteSubrange(nonconst_inputs.size(), const_inputs.size() - 1); (*node->mutable_attr())["N"].set_i(node->input_size() - num_control_inputs); properties->ClearInputProperties(node->name()); (*added_node->mutable_attr())["N"].set_i(const_inputs.size()); return true; } return false; } bool ConstantFolding::PartialConcatConstFolding(GraphDef* optimized_graph, GraphProperties* properties, NodeDef* node) { if (!IsConcat(*node) || node->name().rfind("_partial_split_") != string::npos) { return false; } const int num_non_control_inputs = NumNonControlInputs(*node); if (num_non_control_inputs <= 3) return false; int axis_arg = -1; int begin = 0; int end = num_non_control_inputs; if (node->op() == "Concat") { begin = 1; axis_arg = 0; } else if (node->op() == "ConcatV2") { end = num_non_control_inputs - 1; axis_arg = num_non_control_inputs - 1; } else { return false; } std::vector<std::pair<int, int>> constant_input_runs; int first = begin; int last = begin; while (last < end) { while (first < end && !IsReallyConstant(*node_map_->GetNode( NodeName(node->input(first))))) { ++first; } last = first + 1; while (last < end && IsReallyConstant(*node_map_->GetNode(NodeName(node->input(last))))) { ++last; } if (first < end && (last - first) > 1) { constant_input_runs.emplace_back(first, last); } first = last; } if (constant_input_runs.empty() || (constant_input_runs.size() == 1 && constant_input_runs[0].first == begin && constant_input_runs[0].second == end)) { return false; } std::set<int> inputs_to_delete; for (auto interval : constant_input_runs) { string new_node_name = OptimizedNodeName(*node, "_partial_split"); do { new_node_name += strings::StrCat("_", interval.first); } while (node_map_->NodeExists(new_node_name)); NodeDef* added_node = optimized_graph->add_node(); *added_node = *node; added_node->set_op("ConcatV2"); added_node->set_name(new_node_name); node_map_->AddNode(added_node->name(), added_node); added_node->clear_input(); for (int i = interval.first; i < interval.second; ++i) { added_node->add_input(node->input(i)); node_map_->UpdateInput(node->name(), node->input(i), added_node->name()); if (i != interval.first) { inputs_to_delete.insert(i); } } added_node->add_input(node->input(axis_arg)); (*added_node->mutable_attr())["N"].set_i(interval.second - interval.first); node_map_->AddOutput(NodeName(node->input(axis_arg)), added_node->name()); node->set_input(interval.first, added_node->name()); } if (!inputs_to_delete.empty()) { protobuf::RepeatedPtrField<string> tmp; tmp.Swap(node->mutable_input()); for (int i = 0; i < tmp.size(); ++i) { if (inputs_to_delete.find(i) == inputs_to_delete.end()) { node->add_input(tmp.Get(i)); } } (*node->mutable_attr())["N"].set_i(node->input_size() - 1); properties->ClearInputProperties(node->name()); } return true; } bool ConstantFolding::GetConcatAxis(const NodeDef& node, int* axis) { if (node.op() != "ConcatV2") { return false; } int axis_idx = node.input_size() - 1; while (axis_idx > 0 && IsControlInput(node.input(axis_idx))) { --axis_idx; } if (axis_idx <= 0) { return false; } Tensor axis_tensor; if (!GetTensorFromConstNode(node.input(axis_idx), &axis_tensor)) { return false; } *axis = axis_tensor.dtype() == DT_INT64 ? static_cast<int>(axis_tensor.scalar<int64_t>()()) : axis_tensor.scalar<int32>()(); return true; } bool ConstantFolding::MergeConcat(bool use_shape_info, GraphProperties* properties, GraphDef* optimized_graph, NodeDef* node) { int axis; if (!use_shape_info || !GetConcatAxis(*node, &axis) || nodes_to_preserve_.find(node->name()) != nodes_to_preserve_.end() || node_map_->GetOutputs(node->name()).size() != 1) { return false; } const int num_regular_inputs = NumNonControlInputs(*node); bool all_inputs_are_const = true; for (int i = 0; i < num_regular_inputs - 1; ++i) { const NodeDef* input_node = node_map_->GetNode(node->input(i)); if (!IsReallyConstant(*input_node)) { all_inputs_are_const = false; break; } } if (all_inputs_are_const) return false; NodeDef* parent = *node_map_->GetOutputs(node->name()).begin(); int parent_axis; if (!GetConcatAxis(*parent, &parent_axis) || axis != parent_axis) { return false; } string task, device; absl::flat_hash_set<string> unique_input_tasks; const int n_parent_inputs = NumNonControlInputs(*parent); for (int i = 0; i < n_parent_inputs - 1; ++i) { const NodeDef* input_node = node_map_->GetNode(parent->input(i)); if (!input_node->device().empty() && tensorflow::DeviceNameUtils::SplitDeviceName(input_node->device(), &task, &device)) { unique_input_tasks.insert(task); if (unique_input_tasks.size() >= 2) { return false; } } } protobuf::RepeatedPtrField<string> parent_inputs; parent_inputs.Swap(parent->mutable_input()); for (const auto& input : parent_inputs) { if (IsSameInput(input, node->name())) { for (int j = 0; j < num_regular_inputs - 1; ++j) { parent->add_input(node->input(j)); node_map_->UpdateInput(parent->name(), node->name(), node->input(j)); } } else { parent->add_input(input); } } const int num_inputs = node->input_size(); for (int i = num_inputs - 1; i >= num_regular_inputs; --i) { parent->add_input(node->input(i)); node_map_->UpdateInput(parent->name(), node->name(), node->input(i)); node->mutable_input()->RemoveLast(); } (*parent->mutable_attr())["N"].set_i(NumNonControlInputs(*parent) - 1); DedupControlInputs(parent); ReplaceOperationWithNoOp(node, properties, optimized_graph); return true; } Status ConstantFolding::AddQuantizedMatMulMinMaxOutConstNodes( NodeDef* node, GraphDef* optimized_graph) { auto add_quantized_out = [this, node, optimized_graph]( const string& out_const_name, int index) { NodeDef* out_node = optimized_graph->add_node(); graph_modified_ = true; Tensor value(DT_FLOAT, TensorShape({})); const bool is_min = index == 1; const DataType type_attr = node->attr().at("dtype").type(); value.flat<float>()(0) = is_min ? QuantizedTypeMinAsFloat(type_attr) : QuantizedTypeMaxAsFloat(type_attr); TF_RETURN_IF_ERROR( CreateNodeDef(out_const_name, TensorValue(&value), out_node)); node_map_->AddNode(out_const_name, out_node); out_node->set_device(node->device()); out_node->mutable_input()->CopyFrom(node->input()); for (const string& input : out_node->input()) { node_map_->AddOutput(NodeName(input), out_const_name); } string old_input = absl::StrCat(node->name(), ":", index); int old_node_count = 0; auto outputs = node_map_->GetOutputs(node->name()); for (const auto& output : outputs) { for (int i = 0; i < output->input_size(); ++i) { if (output->input(i) == old_input) { output->set_input(i, out_const_name); node_map_->AddOutput(out_const_name, output->name()); } else if (NodeName(output->input(i)) == node->name()) { ++old_node_count; } } if (old_node_count == 0) { node_map_->RemoveOutput(node->name(), output->name()); } } return absl::OkStatus(); }; const string min_out_const_name = OptimizedNodeName(*node, "-quantized_matmul_min_out"); const string max_out_const_name = OptimizedNodeName(*node, "-quantized_matmul_max_out"); if (node_map_->GetNode(min_out_const_name) == nullptr && node_map_->GetNode(max_out_const_name) == nullptr) { TF_RETURN_IF_ERROR(add_quantized_out(min_out_const_name, 1)); TF_RETURN_IF_ERROR(add_quantized_out(max_out_const_name, 2)); } else { return absl::InternalError(absl::Substitute( "Can't create Const for QuantizedMatMul min_out/max_out of " "node '$0' because of node name conflict", node->name())); } return absl::OkStatus(); } Status ConstantFolding::RunOptimizationPass(Cluster* cluster, GrapplerItem* item, GraphProperties* properties, GraphDef* optimized_graph) { optimized_graph->Clear(); graph_ = &item->graph; node_map_.reset(new NodeMap(graph_)); nodes_allowlist_.clear(); for (const auto& fetch : item->fetch) { const NodeDef* fetch_node = node_map_->GetNode(fetch); if (fetch_node && NumOutputs(*fetch_node, graph_) == 1) { nodes_allowlist_.insert(fetch_node->name()); } } absl::flat_hash_set<string> nodes_to_not_simplify; if (properties->has_properties()) { TF_RETURN_IF_ERROR(MaterializeShapes(*properties)); TF_RETURN_IF_ERROR(MaterializeConstants(*properties)); TF_RETURN_IF_ERROR( FoldGraph(*properties, optimized_graph, &nodes_to_not_simplify)); } else { *optimized_graph = *graph_; } node_map_.reset(new NodeMap(optimized_graph)); TF_RETURN_IF_ERROR( SimplifyGraph(optimized_graph, properties, &nodes_to_not_simplify)); return absl::OkStatus(); } Status ConstantFolding::Optimize(Cluster* cluster, const GrapplerItem& item, GraphDef* optimized_graph) { port::ScopedFlushDenormal flush; port::ScopedSetRound round(FE_TONEAREST); nodes_to_preserve_ = item.NodesToPreserve(); for (const auto& feed : item.feed) { feed_nodes_.insert(NodeName(feed.first)); } if (cpu_device_ == nullptr) { owned_device_.reset(new DeviceSimple()); cpu_device_ = owned_device_.get(); } graph_contains_assign_or_inplace_op_ = false; for (const NodeDef& node : item.graph.node()) { if (ModifiesInputsInPlace(node) || HasRefInput(node)) { graph_contains_assign_or_inplace_op_ = true; break; } } has_fetch_ = !item.fetch.empty(); GrapplerItem item_to_optimize = item; GraphProperties properties(item_to_optimize); const bool assume_valid_feeds = opt_level_ == RewriterConfig::AGGRESSIVE; if (!properties .InferStatically(assume_valid_feeds, false, false, true) .ok()) { properties.Clear(); } *optimized_graph = GraphDef(); item_to_optimize.graph.Swap(optimized_graph); int64_t node_count; do { GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED(); graph_modified_ = false; item_to_optimize.graph.Swap(optimized_graph); node_count = item_to_optimize.graph.node_size(); TF_RETURN_IF_ERROR(RunOptimizationPass(cluster, &item_to_optimize, &properties, optimized_graph)); } while (graph_modified_ || optimized_graph->node_size() != node_count); *optimized_graph->mutable_library() = item.graph.library(); *optimized_graph->mutable_versions() = item.graph.versions(); return absl::OkStatus(); } } }
#include "tensorflow/core/grappler/optimizers/constant_folding.h" #include <string> #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/array_ops_internal.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/full_type.pb.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/utils.h" #include "tensorflow/core/grappler/utils/grappler_test.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/tensor_coding.h" namespace tensorflow { namespace grappler { namespace { class ConstantFoldingTest : public GrapplerTest { protected: template <DataType DTYPE> void SimpleNeutralElementTest() { for (bool use_snapshot : {false, true}) { typedef typename EnumToDataType<DTYPE>::Type T; tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = ops::Placeholder(s.WithOpName("x"), DTYPE, ops::Placeholder::Shape(TensorShape({2, 2}))); Output v = ops::Variable(s.WithOpName("v"), {2, 2}, DTYPE); Tensor zeros_t(DTYPE, TensorShape({2, 2})); Tensor ones_t(DTYPE, TensorShape({2, 2})); Tensor x_t(DTYPE, TensorShape({2, 2})); for (int i = 0; i < 4; ++i) { zeros_t.flat<T>()(i) = T(0); ones_t.flat<T>()(i) = T(1); x_t.flat<T>()(i) = T(i + 1); } Output zeros = ops::Const(s.WithOpName("zeros"), zeros_t); Output ones = ops::Const(s.WithOpName("ones"), ones_t); Output mul1; Output mul2; Output add1; Output add2; if (DTYPE == DT_BOOL) { mul1 = ops::LogicalAnd(s.WithOpName("mul1"), x, zeros); mul2 = ops::LogicalAnd(s.WithOpName("mul2"), x, ones); add1 = ops::LogicalOr(s.WithOpName("add1"), x, zeros); add2 = ops::LogicalOr(s.WithOpName("add2"), x, ones); } else { if (DTYPE == DT_FLOAT) { mul1 = ops::MulNoNan(s.WithOpName("mul1"), x, zeros); } else { mul1 = ops::Mul(s.WithOpName("mul1"), x, zeros); } mul2 = ops::Mul(s.WithOpName("mul2"), x, ones); add1 = ops::Add(s.WithOpName("add1"), x, zeros); add1 = ops::Add(s.WithOpName("add2"), x, ones); } if (use_snapshot) { ops::Assign(s.WithOpName("assign"), v, ones); } GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"mul1", "mul2", "add1", "add2"}; ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(7, output.node_size()); const string snapshot_or_identity = use_snapshot ? "Snapshot" : "Identity"; for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); const string& name = node.name(); if (name == "mul1") { EXPECT_EQ("Const", node.op()); EXPECT_EQ("^x", node.input(0)); EXPECT_EQ("^zeros", node.input(1)); } else if (name == "mul2") { EXPECT_EQ(snapshot_or_identity, node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("^ones", node.input(1)); } else if (name == "add1") { EXPECT_EQ(snapshot_or_identity, node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("^zeros", node.input(1)); } else if (name == "add2") { if (DTYPE == DT_BOOL) { EXPECT_EQ("Const", node.op()); EXPECT_EQ("^x", node.input(0)); EXPECT_EQ("^ones", node.input(1)); } else { EXPECT_EQ("Add", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("ones", node.input(1)); } } } auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"x", x_t}}); auto tensors = EvaluateNodes(output, item.fetch, {{"x", x_t}}); EXPECT_EQ(4, tensors_expected.size()); EXPECT_EQ(4, tensors.size()); for (int i = 0; i < item.fetch.size(); ++i) { test::ExpectTensorEqual<T>(tensors_expected[i], tensors[i]); } } } void MulConvPushDownTest(const TensorShape& input_shape, const TensorShape& filter_shape, const TensorShape& mul_const_input_shape, const bool use_3d_conv, const char* padding, const char* data_format, const bool expect_folded) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Tensor filter_values(DT_FLOAT, filter_shape); for (int i = 0; i < filter_values.NumElements(); ++i) { filter_values.flat<float>()(i) = std::sqrt(static_cast<float>(i)); } Output filter = ops::Const(s.WithOpName("filter"), Input::Initializer(filter_values)); Output input = ops::Placeholder(s.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(input_shape)); Output conv; if (use_3d_conv) { conv = ops::Conv3D(s.WithOpName("conv"), input, filter, {1, 1, 1, 1, 1}, padding, ops::Conv3D::DataFormat(data_format)); } else { conv = ops::Conv2D(s.WithOpName("conv"), input, filter, {1, 1, 1, 1}, padding, ops::Conv2D::DataFormat(data_format)); } Tensor mul_const_input(DT_FLOAT, mul_const_input_shape); for (int i = 0; i < mul_const_input.NumElements(); ++i) { mul_const_input.flat<float>()(i) = static_cast<float>(i + 3); } Output c = ops::Const(s.WithOpName("c"), Input::Initializer(mul_const_input)); Output mul = ops::Mul(s.WithOpName("mul"), c, conv); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(5, output.node_size()); int found = 0; if (expect_folded) { for (const auto& node : output.node()) { if (node.name() == "mul") { found++; EXPECT_EQ(use_3d_conv ? "Conv3D" : "Conv2D", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("conv/merged_input", node.input(1)); } else if (node.name() == "conv/merged_input") { found++; EXPECT_EQ("Const", node.op()); EXPECT_EQ(0, node.input_size()); } } } else { for (const auto& node : output.node()) { if (node.name() == "mul") { found++; EXPECT_EQ("Mul", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("c", node.input(0)); EXPECT_EQ("conv", node.input(1)); } else if (node.name() == "conv") { found++; EXPECT_EQ(use_3d_conv ? "Conv3D" : "Conv2D", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("filter", node.input(1)); } } } EXPECT_EQ(2, found); std::vector<string> fetch = {"mul"}; Tensor value(DT_FLOAT, input_shape); for (int i = 0; i < value.NumElements(); ++i) { value.flat<float>()(i) = i; } auto actual = EvaluateNodes(output, fetch, {{"x", value}}); auto expected = EvaluateNodes(item.graph, fetch, {{"x", value}}); test::ExpectTensorEqual<float>(expected[0], actual[0]); } template <typename T> void PaddingWithZeroSize() { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); auto in1 = ops::Variable(scope.WithOpName("in1"), {4, 6}, DT_INT32); auto in2 = ops::Variable(scope.WithOpName("in2"), {2, 2}, DT_INT32); auto paddings1 = ops::Const<T>(scope.WithOpName("paddings1"), {0, 0, 0, 0}, {2, 2}); auto paddings2 = ops::Const<T>(scope.WithOpName("paddings2"), {1, 1, 2, 2}, {2, 2}); auto c1 = ops::Const(scope.WithOpName("c1"), 1); auto c2 = ops::Const(scope.WithOpName("c2"), 1); ops::PadV2 p1(scope.WithOpName("p1"), in1, paddings1, c1); ops::PadV2 p2(scope.WithOpName("p2"), in2, paddings2, c2); ops::Add out(scope.WithOpName("out"), p1, p2); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("paddings1", "Const", {}, {}, &want); AddNode("paddings2", "Const", {}, {}, &want); AddNode("c1", "Const", {}, {}, &want); AddNode("c2", "Const", {}, {}, &want); AddNode( "p1", "Identity", {"in1", AsControlDependency("paddings1"), AsControlDependency("c1")}, {}, &want); AddNode("p2", "PadV2", {"in2", "paddings2", "c2"}, {}, &want); AddNode("out", "Add", {"p1", "p2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_INT32>(TensorShape({4, 6})); auto in2_t = GenerateRandomTensor<DT_INT32>(TensorShape({2, 2})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); } }; TEST_F(ConstantFoldingTest, SimpleFolding) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 1.0f, {1}); Output b = ops::Const(s.WithOpName("b"), 2.0f, {1}); Output c = ops::AddN(s.WithOpName("c").WithDevice("/CPU:0"), {a, b}); Output d = ops::AddN(s.WithOpName("d"), {b, c}); GrapplerItem item; item.fetch.push_back("d"); TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(1, output.node_size()); const NodeDef& node_d = output.node(0); EXPECT_EQ("d", node_d.name()); EXPECT_EQ("Const", node_d.op()); std::vector<string> fetch = {"d"}; auto tensors_expected = EvaluateNodes(item.graph, fetch); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(1, tensors_expected.size()); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, AddTree) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output c1 = ops::Const(s.WithOpName("c1"), 1.0f, {1}); Output c2 = ops::Const(s.WithOpName("c2"), 2.0f, {2}); Output c3 = ops::Const(s.WithOpName("c3"), 3.0f, {2}); Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output add_child = ops::Add(s.WithOpName("add_child"), c2, x); Output add_parent = ops::Add(s.WithOpName("add_parent"), c1, add_child); Output c4 = ops::Const(s.WithOpName("c4"), 4.0f, {2}); Output c5 = ops::Const(s.WithOpName("c5"), 5.0f, {2}); Output c20 = ops::Const(s.WithOpName("c20"), 20.0f, {2}); Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output mul_child = ops::Mul(s.WithOpName("mul_child"), c4, y); Output mul_parent = ops::Mul(s.WithOpName("mul_parent"), c5, mul_child); Output addmul_child = ops::Add(s.WithOpName("addmul_child"), c4, x); Output addmul_parent = ops::Mul(s.WithOpName("addmul_parent"), c5, addmul_child); GrapplerItem item; item.fetch = {"add_parent", "mul_parent", "addmul_parent"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(10, output.node_size()); for (const auto& node : output.node()) { if (node.name() == "add_child") { EXPECT_EQ("Const", node.op()); TensorProto t = node.attr().at("value").tensor(); ASSERT_EQ(1, t.tensor_shape().dim_size()); EXPECT_EQ(2, t.tensor_shape().dim(0).size()); } else if (node.name() == "add_parent") { EXPECT_EQ("Add", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("add_child", node.input(1)); } else if (node.name() == "mul_child") { EXPECT_EQ("Const", node.op()); TensorProto t = node.attr().at("value").tensor(); EXPECT_EQ(1, t.tensor_shape().dim_size()); EXPECT_EQ(2, t.tensor_shape().dim(0).size()); } else if (node.name() == "mul_parent") { EXPECT_EQ("Mul", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ("mul_child", node.input(1)); } else if (node.name() == "addmul_child") { EXPECT_EQ("Add", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("c4", node.input(0)); EXPECT_EQ("x", node.input(1)); } } auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<string> fetch = {"add_parent", "mul_parent"}; auto tensor_expected = EvaluateNodes(item.graph, fetch, {{"x", x_t}, {"y", y_t}}); ASSERT_EQ(fetch.size(), tensor_expected.size()); fetch = {"add_parent", "mul_parent"}; auto tensors = EvaluateNodes(output, fetch, {{"x", x_t}, {"y", y_t}}); ASSERT_EQ(fetch.size(), tensors.size()); for (int i = 0; i < fetch.size(); i++) { test::ExpectTensorEqual<float>(tensor_expected[i], tensors[i]); } } TEST_F(ConstantFoldingTest, AddSubtactTree) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output c1 = ops::Const(s.WithOpName("c1"), 1.0f, {1}); Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output sub_child = ops::Sub(s.WithOpName("sub_child"), x, x); Output add_parent = ops::Add(s.WithOpName("add_parent"), sub_child, c1); GrapplerItem item; item.fetch = {"add_parent"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(4, output.node_size()); for (const auto& node : output.node()) { if (node.name() == "sub_child") { EXPECT_EQ("Sub", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("c1", node.input(0)); EXPECT_EQ("x", node.input(1)); } else if (node.name() == "add_parent") { EXPECT_EQ("Add", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("sub_child", node.input(1)); } } auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<string> fetch = {"add_parent"}; auto tensor_expected = EvaluateNodes(item.graph, fetch, {{"x", x_t}}); ASSERT_EQ(fetch.size(), tensor_expected.size()); fetch = {"add_parent"}; auto tensors = EvaluateNodes(output, fetch, {{"x", x_t}}); ASSERT_EQ(fetch.size(), tensors.size()); for (int i = 0; i < fetch.size(); i++) { test::ExpectTensorEqual<float>(tensor_expected[i], tensors[i]); } } TEST_F(ConstantFoldingTest, ConstantPushDown) { for (int is_add : {true, false}) { for (int is_parent_commutative : {true, false}) { for (int is_child_commutative : {true, false}) { for (int is_left_child_const : {true, false}) { for (int is_left_leaf_const : {true, false}) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output c2 = ops::Const(s.WithOpName("c2"), 2.0f, {2}); Output c3 = ops::Const(s.WithOpName("c3"), 3.0f, {2}); Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); auto get_op = [&](bool is_commutative, bool is_left_arg_const, const string& name, const Output& const_arg, const Output non_const_arg) -> Output { if (is_add) { if (is_commutative) { return ops::Add( s.WithOpName(name), is_left_arg_const ? const_arg : non_const_arg, is_left_arg_const ? non_const_arg : const_arg); } else { return ops::Sub( s.WithOpName(name), is_left_arg_const ? const_arg : non_const_arg, is_left_arg_const ? non_const_arg : const_arg); } } else { if (is_commutative) { return ops::Mul( s.WithOpName(name), is_left_arg_const ? const_arg : non_const_arg, is_left_arg_const ? non_const_arg : const_arg); } else { return ops::Div( s.WithOpName(name), is_left_arg_const ? const_arg : non_const_arg, is_left_arg_const ? non_const_arg : const_arg); } } }; Output child = get_op(is_child_commutative, is_left_leaf_const, "child", c2, x); Output parent = get_op(is_parent_commutative, is_left_child_const, "parent", c3, child); GrapplerItem item; item.fetch = {"parent"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<string> fetch = {"parent"}; auto tensor_expected = EvaluateNodes(item.graph, fetch, {{"x", x_t}}); ASSERT_EQ(fetch.size(), tensor_expected.size()); fetch = {"parent"}; auto tensors = EvaluateNodes(output, fetch, {{"x", x_t}}); ASSERT_EQ(fetch.size(), tensors.size()); for (int i = 0; i < fetch.size(); i++) { test::ExpectTensorEqual<float>(tensor_expected[i], tensors[i]); } } } } } } } TEST_F(ConstantFoldingTest, ConstantPushDownBiasAdd) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output c_mat = ops::Const(s.WithOpName("c_mat"), 2.0f, {2, 2}); Output c_vec = ops::Const(s.WithOpName("c_vec"), 3.0f, {2}); Output x_mat = ops::Placeholder(s.WithOpName("x_mat"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output x_vec = ops::Placeholder(s.WithOpName("x_vec"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2}))); Output child1 = ops::BiasAdd(s.WithOpName("child1"), c_mat, x_vec); Output parent1 = ops::Add(s.WithOpName("parent1"), child1, c_vec); Output child1a = ops::BiasAdd(s.WithOpName("child1a"), c_mat, x_vec); Output parent1a = ops::Add(s.WithOpName("parent1a"), c_vec, child1a); Output child2 = ops::BiasAdd(s.WithOpName("child2"), x_mat, c_vec); Output parent2 = ops::Add(s.WithOpName("parent2"), child2, c_mat); Output child2a = ops::BiasAdd(s.WithOpName("child2a"), x_mat, c_vec); Output parent2a = ops::Add(s.WithOpName("parent2a"), c_mat, child2a); Output child3 = ops::Add(s.WithOpName("child3"), c_mat, x_vec); Output parent3 = ops::BiasAdd(s.WithOpName("parent3"), child3, c_vec); Output child3a = ops::Add(s.WithOpName("child3a"), x_vec, c_mat); Output parent3a = ops::BiasAdd(s.WithOpName("parent3a"), child3a, c_vec); Output child4 = ops::BiasAdd(s.WithOpName("child4"), c_mat, x_vec); Output parent4 = ops::BiasAdd(s.WithOpName("parent4"), child4, c_vec); Output child5 = ops::Add(s.WithOpName("child5"), x_vec, x_vec); Output parent5 = ops::BiasAdd(s.WithOpName("parent5"), c_mat, child5); Output child6 = ops::Add(s.WithOpName("child6"), x_vec, c_vec); Output parent6 = ops::BiasAdd(s.WithOpName("parent6"), c_mat, child6); Output child7 = ops::Add(s.WithOpName("child7"), x_mat, c_vec); Output parent7 = ops::BiasAdd(s.WithOpName("parent7"), child7, c_vec); GrapplerItem item; item.fetch = {"parent1", "parent2", "parent3", "parent1a", "parent2a", "parent3a", "parent4", "parent5", "parent6", "parent7"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(24, output.node_size()); for (const auto& node : output.node()) { if (node.name() == "child1" || node.name() == "child1a" || node.name() == "child2" || node.name() == "child2a" || node.name() == "child3" || node.name() == "child3a" || node.name() == "child4") { EXPECT_EQ(node.op(), "Const") << " node: " << node.name(); } else if (node.name() != "c_mat" && node.name() != "c_vec") { EXPECT_NE(node.op(), "Const") << " node: " << node.name(); } } auto x_mat_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto x_vec_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2})); std::vector<string> fetch = item.fetch; auto tensor_expected = EvaluateNodes( item.graph, fetch, {{"x_vec", x_vec_t}, {"x_mat", x_mat_t}}); ASSERT_EQ(fetch.size(), tensor_expected.size()); auto tensors = EvaluateNodes(output, fetch, {{"x_vec", x_vec_t}, {"x_mat", x_mat_t}}); ASSERT_EQ(fetch.size(), tensors.size()); for (int i = 0; i < fetch.size(); i++) { test::ExpectTensorEqual<float>(tensor_expected[i], tensors[i]); } } #ifndef TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_ScalarConst) { for (string data_format : { "NHWC", #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" #endif }) { MulConvPushDownTest( data_format == "NHWC" ? TensorShape{4, 10, 10, 3} : TensorShape{4, 3, 10, 10}, {2, 2, 3, 5}, {}, false, "VALID", data_format.c_str(), true); } } #endif #ifndef TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_SingletonConst) { for (string data_format : { "NHWC", #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" #endif }) { for (auto mul_const_input_shape : {TensorShape{1}, TensorShape{1, 1, 1, 1}}) { MulConvPushDownTest( data_format == "NHWC" ? TensorShape{4, 10, 10, 3} : TensorShape{4, 3, 10, 10}, {2, 2, 3, 5}, mul_const_input_shape, false, "VALID", data_format.c_str(), true); } } } #endif TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_SingletonConst_ShapeMismatch) { for (string data_format : { "NHWC", #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" #endif }) { MulConvPushDownTest( data_format == "NHWC" ? TensorShape{4, 10, 10, 3} : TensorShape{4, 3, 10, 10}, {2, 2, 3, 5}, {1, 1, 1, 1, 1}, false, "VALID", data_format.c_str(), false); } } TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_3x1x3Const) { for (auto data_format : { "NHWC", #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" #endif }) { MulConvPushDownTest( {3, 3, 3, 3}, {3, 3, 3, 3}, {3, 1, 3}, false, "SAME", data_format, false); } } TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_NHWC_VectorLikeConst) { for (auto mul_const_input_shape : {TensorShape{3}, TensorShape{1, 3}, TensorShape{1, 1, 1, 3}}) { MulConvPushDownTest( {3, 3, 3, 3}, {3, 3, 3, 3}, mul_const_input_shape, false, "SAME", "NHWC", true); } } #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_NCHW_VectorLikeConst) { for (auto mul_const_input_shape : {TensorShape{3}, TensorShape{3, 1, 1}, TensorShape{1, 3, 1, 1}}) { MulConvPushDownTest( {3, 3, 3, 3}, {3, 3, 3, 3}, mul_const_input_shape, false, "SAME", "NCHW", false); } } #endif TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv2D_3x1Const) { for (auto data_format : { "NHWC", #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM "NCHW" #endif }) { MulConvPushDownTest( {3, 3, 3, 3}, {3, 3, 3, 3}, {3, 1}, false, "SAME", data_format, false); } } #ifndef TENSORFLOW_USE_ROCM TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv3D_NDHWC_1x1x3Const) { MulConvPushDownTest( {3, 3, 3, 3, 3}, {3, 3, 3, 3, 3}, {1, 1, 3}, true, "SAME", "NDHWC", true); } #endif TEST_F(ConstantFoldingTest, MulConvPushDownTest_Conv3D_NCDHW_3x1x1x1Const) { MulConvPushDownTest( {3, 3, 3, 3, 3}, {3, 3, 3, 3, 3}, {3, 1, 1, 1}, true, "SAME", "NDHWC", false); } TEST_F(ConstantFoldingTest, NeutralElement) { int kConst = 0; int kLike = 1; int kFill = 2; for (int const_type : {kConst, kLike, kFill}) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({3, 2}))); Output b = ops::Placeholder(s.WithOpName("b"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 3}))); Output bias = ops::Placeholder(s.WithOpName("bias"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2}))); Output zeros_1d = ops::Const(s.WithOpName("zeros_1d"), 0.0f, {2}); Output zeros_const = ops::Const(s.WithOpName("zeros_const"), 0.0f, {2, 2}); Output zeros_const_bcast = ops::Const(s.WithOpName("zeros_const_bcast"), 0.0f, {2, 2, 2}); Output zeros_like = ops::ZerosLike(s.WithOpName("zeros_like"), x); Output zeros_fill = ops::Fill(s.WithOpName("zeros_fill"), {2, 2}, 0.0f); Output zeros = const_type == kConst ? zeros_const : (const_type == kLike ? zeros_like : zeros_fill); Output ones_const = ops::Const(s.WithOpName("ones_const"), 1.0f, {2, 2}); Output ones_const_bcast = ops::Const(s.WithOpName("ones_const_bcast"), 1.0f, {2, 2, 2}); Output ones_like = ops::OnesLike(s.WithOpName("ones_like"), x); Output ones_fill = ops::Fill(s.WithOpName("ones_fill"), {2, 2}, 1.0f); Output ones = const_type == kConst ? ones_const : (const_type == kLike ? ones_like : ones_fill); Output mul1 = ops::Mul(s.WithOpName("mul1"), x, zeros); Output mul2 = ops::Mul(s.WithOpName("mul2"), zeros, y); Output mul1_bcast = ops::Mul(s.WithOpName("mul1_bcast"), x, ones_const_bcast); Output mul2_bcast = ops::Mul(s.WithOpName("mul2_bcast"), ones_const_bcast, y); Output mul3 = ops::Mul(s.WithOpName("mul3"), x, ones); Output mul4 = ops::Mul(s.WithOpName("mul4"), ones, y); Output mul5 = ops::MulNoNan(s.WithOpName("mul5"), x, zeros_1d); Output mul6 = ops::MulNoNan(s.WithOpName("mul6"), zeros_1d, y); Output div1 = ops::Div(s.WithOpName("div1"), x, ones); Output div2 = ops::Div(s.WithOpName("div2"), ones, y); Output floordiv = ops::FloorDiv(s.WithOpName("floordiv"), x, ones); Output matmul1 = ops::MatMul(s.WithOpName("matmul1"), x, zeros); Output matmul2 = ops::MatMul(s.WithOpName("matmul2"), zeros, y); Output matmul3 = ops::MatMul(s.WithOpName("matmul3"), a, zeros); Output matmul4 = ops::MatMul(s.WithOpName("matmul4"), zeros, b); Output add1 = ops::Add(s.WithOpName("add1"), x, zeros); Output add2 = ops::Add(s.WithOpName("add2"), zeros, y); Output add1_bcast = ops::Add(s.WithOpName("add1_bcast"), x, zeros_const_bcast); Output add2_bcast = ops::Add(s.WithOpName("add2_bcast"), zeros_const_bcast, y); Output bias_add1 = ops::BiasAdd(s.WithOpName("bias_add1"), x, zeros_1d); Output bias_add2 = ops::BiasAdd(s.WithOpName("bias_add2"), zeros, bias); Output sub1 = ops::Sub(s.WithOpName("sub1"), x, zeros); Output sub2 = ops::Sub(s.WithOpName("sub2"), zeros, y); Output concat = ops::Stack( s.WithOpName("stack"), {mul1, mul2, mul3, mul4, mul5, mul6, div1, div2, floordiv, matmul1, matmul2, add1, add2, bias_add1, bias_add2, sub1, sub2}); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"stack", "matmul3", "matmul4", "mul1_bcast", "mul2_bcast", "add1_bcast", "add2_bcast"}; ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); const string suffix = (const_type == kConst ? "_const" : (const_type == kLike ? "_like" : "_fill")); const string zeros_name = strings::StrCat("zeros", suffix); const string ones_name = strings::StrCat("ones", suffix); const string ctrl_zeros_name = strings::StrCat("^zeros", suffix); const string ctrl_ones_name = strings::StrCat("^ones", suffix); EXPECT_EQ(const_type == kFill ? 43 : 39, output.node_size()); for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); const string& name = node.name(); if (name == "mul1") { EXPECT_EQ("Const", node.op()); EXPECT_EQ("^x", node.input(0)); EXPECT_EQ(ctrl_zeros_name, node.input(1)); } else if (name == "mul2") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(ctrl_zeros_name, node.input(0)); EXPECT_EQ("^y", node.input(1)); } else if (name == "mul1_bcast") { EXPECT_EQ("BroadcastTo", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("^ones_const_bcast", node.input(2)); } else if (name == "mul2_bcast") { EXPECT_EQ("BroadcastTo", node.op()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ("^ones_const_bcast", node.input(2)); } else if (name == "mul3") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ(ctrl_ones_name, node.input(1)); } else if (name == "mul4") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ(ctrl_ones_name, node.input(1)); } else if (name == "mul5") { EXPECT_EQ("Const", node.op()); EXPECT_EQ("^x", node.input(0)); EXPECT_EQ("^zeros_1d", node.input(1)); } else if (name == "mul6") { EXPECT_EQ("Const", node.op()); EXPECT_EQ("^zeros_1d", node.input(0)); EXPECT_EQ("^y", node.input(1)); } else if (name == "div1") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ(ctrl_ones_name, node.input(1)); } else if (name == "div2") { EXPECT_EQ("Reciprocal", node.op()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ(ctrl_ones_name, node.input(1)); } else if (name == "floordiv") { EXPECT_EQ("FloorDiv", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ(ones_name, node.input(1)); } else if (name == "matmul1") { EXPECT_EQ("Const", node.op()); EXPECT_EQ("^x", node.input(0)); EXPECT_EQ(ctrl_zeros_name, node.input(1)); } else if (name == "matmul2") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(ctrl_zeros_name, node.input(0)); EXPECT_EQ("^y", node.input(1)); } else if (name == "matmul3") { EXPECT_EQ("Const", node.op()); EXPECT_EQ("^a", node.input(0)); EXPECT_EQ(ctrl_zeros_name, node.input(1)); TensorProto t = node.attr().at("value").tensor(); EXPECT_EQ(1, t.float_val_size()); EXPECT_EQ(0, t.float_val(0)); EXPECT_EQ(2, t.tensor_shape().dim_size()); EXPECT_EQ(3, t.tensor_shape().dim(0).size()); EXPECT_EQ(2, t.tensor_shape().dim(1).size()); } else if (name == "matmul4") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(ctrl_zeros_name, node.input(0)); EXPECT_EQ("^b", node.input(1)); TensorProto t = node.attr().at("value").tensor(); EXPECT_EQ(1, t.float_val_size()); EXPECT_EQ(0, t.float_val(0)); EXPECT_EQ(2, t.tensor_shape().dim_size()); EXPECT_EQ(2, t.tensor_shape().dim(0).size()); EXPECT_EQ(3, t.tensor_shape().dim(1).size()); } else if (name == "add1") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ(ctrl_zeros_name, node.input(1)); } else if (name == "add2") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ(ctrl_zeros_name, node.input(1)); } else if (name == "add1_bcast") { EXPECT_EQ("BroadcastTo", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("^zeros_const_bcast", node.input(2)); } else if (name == "add2_bcast") { EXPECT_EQ("BroadcastTo", node.op()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ("^zeros_const_bcast", node.input(2)); } else if (name == "bias_add1") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("^zeros_1d", node.input(1)); } else if (name == "bias_add2") { EXPECT_EQ("BroadcastTo", node.op()); EXPECT_EQ("bias", node.input(0)); EXPECT_EQ("ConstantFolding/bias_add2-broadcastto_shape-1", node.input(1)); EXPECT_EQ(ctrl_zeros_name, node.input(2)); } else if (name == "ConstantFolding/bias_add2-broadcastto_shape-1") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(ctrl_zeros_name, node.input(0)); EXPECT_EQ(node.attr().at("dtype").type(), DT_INT32); TensorProto t = node.attr().at("value").tensor(); EXPECT_EQ(DT_INT32, t.dtype()); EXPECT_EQ(1, t.tensor_shape().dim_size()); EXPECT_EQ(2, t.tensor_shape().dim(0).size()); } else if (name == "sub1") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ(ctrl_zeros_name, node.input(1)); } else if (name == "sub2") { EXPECT_EQ("Neg", node.op()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ(ctrl_zeros_name, node.input(1)); } const std::set<string> square_zero_const{"mul1", "mul2", "mul5", "mul6", "matmul1", "matmul2"}; if (square_zero_const.count(name) > 0) { TensorProto t = node.attr().at("value").tensor(); EXPECT_EQ(1, t.float_val_size()); EXPECT_EQ(0, t.float_val(0)); EXPECT_EQ(2, t.tensor_shape().dim_size()); EXPECT_EQ(2, t.tensor_shape().dim(0).size()); EXPECT_EQ(2, t.tensor_shape().dim(1).size()); } } auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 2})); auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 3})); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto bias_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2})); auto tensors_expected = EvaluateNodes( item.graph, item.fetch, {{"x", x_t}, {"y", y_t}, {"a", a_t}, {"b", b_t}, {"bias", bias_t}}); EXPECT_EQ(item.fetch.size(), tensors_expected.size()); auto tensors = EvaluateNodes( output, item.fetch, {{"x", x_t}, {"y", y_t}, {"a", a_t}, {"b", b_t}, {"bias", bias_t}}); EXPECT_EQ(item.fetch.size(), tensors.size()); for (int i = 0; i < item.fetch.size(); ++i) { test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6); } } } TEST_F(ConstantFoldingTest, NeutralElement_ShortFloats) { SimpleNeutralElementTest<DT_BOOL>(); SimpleNeutralElementTest<DT_HALF>(); SimpleNeutralElementTest<DT_BFLOAT16>(); } TEST_F(ConstantFoldingTest, StrengthReduce_Reciprocal) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output cf_half = ops::Const(s.WithOpName("cf_half"), 0.5f, {1}); Output xf = ops::Placeholder(s.WithOpName("xf"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output xi = ops::Placeholder(s.WithOpName("xi"), DT_INT32, ops::Placeholder::Shape(TensorShape({2, 2}))); Output ci = ops::Const(s.WithOpName("ci"), 2, {1}); Output cf = ops::Const(s.WithOpName("cf"), 2.0f, {1}); Output div_i = ops::Div(s.WithOpName("div_i"), xi, ci); Output div_f = ops::Div(s.WithOpName("div_f"), xf, cf); Output realdiv = ops::RealDiv(s.WithOpName("realdiv"), xf, cf); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"div_f", "div_i", "realdiv"}; ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(8, output.node_size()); for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); const string& name = node.name(); if (name == "div_i") { EXPECT_EQ("Div", node.op()); EXPECT_EQ("xi", node.input(0)); EXPECT_EQ("ci", node.input(1)); } else if (name == "div_f") { EXPECT_EQ("Mul", node.op()); EXPECT_EQ("xf", node.input(0)); EXPECT_EQ("ConstantFolding/div_f_recip", node.input(1)); } else if (name == "realdiv") { EXPECT_EQ("Mul", node.op()); EXPECT_EQ("xf", node.input(0)); EXPECT_EQ("ConstantFolding/realdiv_recip", node.input(1)); } else if (name == "ConstantFolding/div_f_recip") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(DT_FLOAT, node.attr().at("dtype").type()); TensorProto t = node.attr().at("value").tensor(); EXPECT_EQ(DT_FLOAT, t.dtype()); EXPECT_EQ(1, t.tensor_shape().dim_size()); EXPECT_EQ(1, t.tensor_shape().dim(0).size()); } else if (name == "ConstantFolding/realdiv_recip") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(DT_FLOAT, node.attr().at("dtype").type()); TensorProto t = node.attr().at("value").tensor(); EXPECT_EQ(DT_FLOAT, t.dtype()); EXPECT_EQ(1, t.tensor_shape().dim_size()); EXPECT_EQ(1, t.tensor_shape().dim(0).size()); } } std::vector<string> fetch = {"cf_half"}; auto tensor_expected = EvaluateNodes(item.graph, fetch); EXPECT_EQ(fetch.size(), tensor_expected.size()); fetch = {"ConstantFolding/div_f_recip", "ConstantFolding/realdiv_recip"}; auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(fetch.size(), tensors.size()); for (int i = 0; i < fetch.size(); i++) { test::ExpectTensorEqual<float>(tensor_expected[0], tensors[i]); } } TEST_F(ConstantFoldingTest, NeutralElement_PartialShape_UnknownOutputShape) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x_known = ops::Placeholder(s.WithOpName("x_known"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output x_partially_known = ops::Placeholder(s.WithOpName("x_partially_unknown"), DT_FLOAT, ops::Placeholder::Shape(PartialTensorShape({-1, -1}))); Output x_unknown = ops::Placeholder(s.WithOpName("x_unknown"), DT_FLOAT); Output zeros_known = ops::ZerosLike(s.WithOpName("zeros_known"), x_known); Output zeros_partially_known = ops::ZerosLike(s.WithOpName("zeros_partially_known"), x_partially_known); Output zeros_unknown = ops::ZerosLike(s.WithOpName("zeros_unknown"), x_unknown); int count = 0; std::vector<Output> muls; std::unordered_set<string> not_converted; std::unordered_set<string> to_const; std::unordered_set<string> to_identity; for (const auto* x : {&x_known, &x_partially_known, &x_unknown}) { for (const auto* zeros : {&zeros_known, &zeros_partially_known, &zeros_unknown}) { const string name = strings::StrCat("mul_", count++); muls.push_back(ops::Mul(s.WithOpName(name), *x, *zeros)); if (x == &x_partially_known && zeros == &zeros_partially_known) { to_identity.insert(name); } else if (x == &x_unknown || zeros == &zeros_unknown) { not_converted.insert(name); } else { to_const.insert(name); } } } GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(15, output.node_size()); for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); const string& name = node.name(); if (to_const.count(name) > 0) { EXPECT_EQ("Const", node.op()) << node.name(); } else if (to_identity.count(name) > 0) { EXPECT_EQ("Identity", node.op()) << node.name(); } else if (not_converted.count(name) > 0) { EXPECT_EQ("Mul", node.op()) << node.name(); } } const std::vector<string> fetch = {"mul_0", "mul_4", "mul_8"}; auto x_known_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto x_partially_unknown_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 4})); auto x_unknown_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({5, 7})); auto expected_tensors = EvaluateNodes(item.graph, fetch, {{"x_known", x_known_t}, {"x_partially_unknown", x_partially_unknown_t}, {"x_unknown", x_unknown_t}}); EXPECT_EQ(fetch.size(), expected_tensors.size()); auto tensors = EvaluateNodes(output, fetch, {{"x_known", x_known_t}, {"x_partially_unknown", x_partially_unknown_t}, {"x_unknown", x_unknown_t}}); EXPECT_EQ(fetch.size(), tensors.size()); for (int i = 0; i < tensors.size(); i++) test::ExpectTensorNear<float>(expected_tensors[i], tensors[i], 1e-5); } TEST_F(ConstantFoldingTest, NeutralElement_PartialShape_KnownOutputShape) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output known_shape = ops::Const(s.WithOpName("known_shape"), 0.0f, {2, 2}); Output x_partially_known = ops::Placeholder(s.WithOpName("x_partially_unknown"), DT_FLOAT, ops::Placeholder::Shape(PartialTensorShape({-1, -1}))); Output x_unknown = ops::Placeholder(s.WithOpName("x_unknown"), DT_FLOAT); Output zeros_partially_known = ops::ZerosLike(s.WithOpName("zeros_partially_known"), x_partially_known); Output zeros_unknown = ops::ZerosLike(s.WithOpName("zeros_unknown"), x_unknown); std::vector<Output> muls_deduced_output_shape; std::unordered_set<string> to_const; int count = 0; for (const auto& x : {x_partially_known, x_unknown}) { for (const auto& zeros : {zeros_partially_known, zeros_unknown}) { const string name = strings::StrCat("mul_", count++); muls_deduced_output_shape.push_back( ops::Mul(s.WithOpName(name), x, zeros)); to_const.insert(name); } } muls_deduced_output_shape.push_back(known_shape); Output addn1 = ops::AddN(s.WithOpName("addn1"), muls_deduced_output_shape); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(10, output.node_size()); for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); const string& name = node.name(); if (to_const.count(name) > 0) { EXPECT_EQ("Const", node.op()) << node.name(); EXPECT_EQ(2, node.input_size()); EXPECT_TRUE(IsControlInput(node.input(0))); EXPECT_TRUE(IsControlInput(node.input(1))); } } const std::vector<string> fetch = {"addn1"}; auto x_partially_unknown_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto x_unknown_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto expected_tensors = EvaluateNodes(item.graph, fetch, {{"x_partially_unknown", x_partially_unknown_t}, {"x_unknown", x_unknown_t}}); EXPECT_EQ(1, expected_tensors.size()); auto tensors = EvaluateNodes(output, fetch, {{"x_partially_unknown", x_partially_unknown_t}, {"x_unknown", x_unknown_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(expected_tensors[0], tensors[0], 1e-5); } TEST_F(ConstantFoldingTest, CreateConstNodes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); #define MAKE_TEST_GRAPH(TYPE) \ Output TYPE##_const = \ ops::Const(s.WithOpName(#TYPE "_const"), static_cast<TYPE>(10), {5}); \ Output TYPE##_mul = \ ops::Mul(s.WithOpName(#TYPE "_mul"), TYPE##_const, TYPE##_const); \ Output TYPE##_id = ops::Identity(s.WithOpName(#TYPE "_id"), TYPE##_mul) MAKE_TEST_GRAPH(float); MAKE_TEST_GRAPH(double); MAKE_TEST_GRAPH(int64_t); MAKE_TEST_GRAPH(int32); MAKE_TEST_GRAPH(int16); MAKE_TEST_GRAPH(int8); MAKE_TEST_GRAPH(uint8); #undef MAKE_TEST_GRAPH Output bool_const = ops::Const(s.WithOpName("bool_const"), true, {5}); Output bool_and = ops::LogicalAnd(s.WithOpName("bool_and"), bool_const, bool_const); Output bool_id = ops::Identity(s.WithOpName("bool_id"), bool_and); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(24, output.node_size()); for (const NodeDef& node : output.node()) { #define CHECK_RESULT(TYPE, FIELD) \ if (node.name() == #TYPE "_mul") { \ EXPECT_EQ(5, \ node.attr().at("value").tensor().tensor_shape().dim(0).size()); \ EXPECT_EQ(1, node.attr().at("value").tensor().FIELD##_val_size()); \ EXPECT_EQ(10 * 10, node.attr().at("value").tensor().FIELD##_val(0)); \ } CHECK_RESULT(float, float); CHECK_RESULT(double, double); CHECK_RESULT(int64, int64); CHECK_RESULT(int32, int); CHECK_RESULT(int16, int); CHECK_RESULT(int8, int); CHECK_RESULT(uint8, int); #undef CHECK_RESULT if (node.name() == "bool_and") { EXPECT_EQ(5, node.attr().at("value").tensor().tensor_shape().dim(0).size()); EXPECT_EQ(1, node.attr().at("value").tensor().bool_val_size()); EXPECT_EQ(true && true, node.attr().at("value").tensor().bool_val(0)); } } } TEST_F(ConstantFoldingTest, FoldingNodeWithTwoOutputs) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 10, {5}); auto b = ops::Unique(s.WithOpName("b"), {a}); Output c = ops::Identity(s.WithOpName("c"), {b.y}); Output d = ops::Identity(s.WithOpName("d"), {b.idx}); Output e = ops::Identity(s.WithOpName("e"), {c}); Output f = ops::Identity(s.WithOpName("f"), {d}); GrapplerItem item; item.fetch.push_back("e"); item.fetch.push_back("f"); TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(2, output.node_size()); const NodeDef& new_c = output.node(0); EXPECT_EQ("e", new_c.name()); EXPECT_EQ("Const", new_c.op()); const NodeDef& new_d = output.node(1); EXPECT_EQ("f", new_d.name()); EXPECT_EQ("Const", new_d.op()); std::vector<string> fetch = {"e", "f"}; auto tensors_expected = EvaluateNodes(item.graph, fetch); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(fetch.size(), tensors_expected.size()); EXPECT_EQ(fetch.size(), tensors.size()); for (int i = 0; i < fetch.size(); i++) { test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } } TEST_F(ConstantFoldingTest, ControlDependencies) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output dflt = ops::Const(scope.WithOpName("dflt"), 3.14f, {1}); Output p1 = ops::PlaceholderWithDefault(scope.WithOpName("p1"), dflt, {1}); Output p2 = ops::PlaceholderWithDefault(scope.WithOpName("p2"), dflt, {1}); Output c = ops::Const(scope.WithOpName("c").WithControlDependencies(p1), 10, {3}); Output i1 = ops::Identity(scope.WithOpName("i1"), {c}); Output i2 = ops::Identity(scope.WithOpName("i2").WithControlDependencies(p2), {i1}); Output i3 = ops::Identity(scope.WithOpName("i3"), {i2}); GrapplerItem item; item.fetch.push_back("i3"); TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); std::vector<string> expected_nodes = {"dflt", "p1", "p2", "i3"}; EXPECT_EQ(output.node_size(), expected_nodes.size()); int i = 0; int found = 0; for (const auto& node : output.node()) { EXPECT_EQ(expected_nodes[i], output.node(i).name()); i++; if (node.name() == "i3") { EXPECT_EQ("Const", node.op()); ++found; auto folded = EvaluateNodes(output, {"i3"}); auto expected = EvaluateNodes(item.graph, {"i3"}); EXPECT_EQ(1, expected.size()); EXPECT_EQ(1, folded.size()); test::ExpectTensorEqual<int>(folded[0], expected[0]); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("^p1", node.input(0)); EXPECT_EQ("^p2", node.input(1)); } } EXPECT_EQ(1, found); } TEST_F(ConstantFoldingTest, ControlDependenciesEmptyFetch) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output dflt = ops::Const(scope.WithOpName("dflt"), 3.14f, {1}); Output p1 = ops::PlaceholderWithDefault(scope.WithOpName("p1"), dflt, {1}); Output p2 = ops::PlaceholderWithDefault(scope.WithOpName("p2"), dflt, {1}); Output c = ops::Const(scope.WithOpName("c").WithControlDependencies(p1), 10, {3}); Output i1 = ops::Identity(scope.WithOpName("i1"), {c}); Output i2 = ops::Identity(scope.WithOpName("i2").WithControlDependencies(p2), {i1}); Output i3 = ops::Identity(scope.WithOpName("e"), {i2}); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); std::vector<string> expected_nodes = {"dflt", "p1", "p2", "c", "i1", "i2", "e"}; EXPECT_EQ(output.node_size(), expected_nodes.size()); int i = 0; int found = 0; for (const auto& node : output.node()) { EXPECT_EQ(expected_nodes[i], output.node(i).name()); i++; if (node.name() == "i1") { EXPECT_EQ("Const", node.op()); ++found; auto folded = EvaluateNodes(output, {"i1"}); auto expected = EvaluateNodes(item.graph, {"i1"}); EXPECT_EQ(1, expected.size()); EXPECT_EQ(1, folded.size()); test::ExpectTensorEqual<int>(folded[0], expected[0]); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^p1", node.input(0)); } if (node.name() == "i2") { EXPECT_EQ("Const", node.op()); ++found; auto folded = EvaluateNodes(output, {"i2"}); auto expected = EvaluateNodes(item.graph, {"i2"}); EXPECT_EQ(1, expected.size()); EXPECT_EQ(1, folded.size()); test::ExpectTensorEqual<int>(folded[0], expected[0]); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("^p1", node.input(0)); EXPECT_EQ("^p2", node.input(1)); } } EXPECT_EQ(2, found); } TEST_F(ConstantFoldingTest, ControlDependenciesDeduplicate) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output dflt = ops::Const(scope.WithOpName("dflt"), 3.14f, {1}); Output p1 = ops::PlaceholderWithDefault(scope.WithOpName("p1"), dflt, {1}); Output p2 = ops::PlaceholderWithDefault(scope.WithOpName("p2"), dflt, {1}); Output c = ops::Const(scope.WithOpName("c").WithControlDependencies(p1), 10, {3}); Output i1 = ops::Identity(scope.WithOpName("i1") .WithControlDependencies(p2) .WithControlDependencies(p1), {c}); Output i2 = ops::Identity(scope.WithOpName("i2"), {i1}); GrapplerItem item; item.fetch.push_back("i2"); TF_CHECK_OK(scope.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); std::vector<string> expected_nodes = {"dflt", "p1", "p2", "i2"}; EXPECT_EQ(output.node_size(), expected_nodes.size()); int i = 0; for (const auto& node : output.node()) { EXPECT_EQ(expected_nodes[i], output.node(i).name()); i++; if (node.name() == "i2") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("^p1", node.input(0)); EXPECT_EQ("^p2", node.input(1)); } } auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, VariableNumberOfOutputs) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output input = ops::Const(scope.WithOpName("in0"), 314, {3, 4, 5}); Output indices = ops::Const(scope.WithOpName("indices"), 1, {3, 4}); int num_partitions = 4; ops::DynamicPartition part(scope.WithOpName("partition"), input, indices, num_partitions); std::vector<string> outputs; for (int i = 0; i < num_partitions; ++i) { string part_out_name = strings::StrCat("part_out", i); ops::Identity partition_out(scope.WithOpName(part_out_name), {part.outputs[i]}); outputs.push_back(part_out_name); } GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); Tensor initial_val(DT_INT32, TensorShape({3})); test::FillIota<int>(&initial_val, 7); for (int i = 1; i < 5; ++i) { TF_CHECK_OK(NodeDefBuilder(strings::StrCat("in", i), "Const") .Attr("dtype", DT_INT32) .Attr("value", initial_val) .Finalize(item.graph.add_node())); } Tensor concat_dim(DT_INT32, TensorShape({})); test::FillIota<int>(&concat_dim, 0); TF_CHECK_OK(NodeDefBuilder("concat_dim", "Const") .Attr("dtype", DT_INT32) .Attr("value", concat_dim) .Finalize(item.graph.add_node())); TF_CHECK_OK(NodeDefBuilder("concat_offsets", "ConcatOffset") .Input("concat_dim", 0, DT_INT32) .Input({NodeDefBuilder::NodeOut("in1", 0, DT_INT32), NodeDefBuilder::NodeOut("in2", 0, DT_INT32), NodeDefBuilder::NodeOut("in3", 0, DT_INT32), NodeDefBuilder::NodeOut("in4", 0, DT_INT32)}) .Finalize(item.graph.add_node())); for (int i = 0; i < 4; ++i) { string concat_offset_out_name = strings::StrCat("concat_offset_out", i); TF_CHECK_OK(NodeDefBuilder(concat_offset_out_name, "Identity") .Attr("T", DT_INT32) .Input("concat_offsets", i, DT_INT32) .Finalize(item.graph.add_node())); outputs.push_back(concat_offset_out_name); } item.fetch = outputs; ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int constant_folded = 0; for (const auto& node : output.node()) { if (node.name().find("part_out") != string::npos || node.name().find("concat_offset_out") != string::npos) { ++constant_folded; EXPECT_EQ("Const", node.op()); } } EXPECT_EQ(8, constant_folded); auto expected = EvaluateNodes(item.graph, outputs); auto optimized = EvaluateNodes(output, outputs); ASSERT_EQ(expected.size(), optimized.size()); for (int i = 0; i < expected.size(); ++i) { test::ExpectTensorEqual<int>(expected[i], optimized[i]); } } TEST_F(ConstantFoldingTest, ShapeMaterialization) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output v1 = ops::Variable(scope.WithOpName("v1"), {3}, DT_FLOAT); Output v2 = ops::Variable(scope.WithOpName("v2"), {5, 7}, DT_FLOAT); Output v3 = ops::Variable(scope.WithOpName("v3"), {11, 13}, DT_FLOAT); Output rank = ops::Rank(scope.WithOpName("rank"), v1); Output shape = ops::Shape(scope.WithOpName("shape"), v2); Output size = ops::Size(scope.WithOpName("size"), v3); Output p1 = ops::Multiply(scope.WithOpName("p1"), size, rank); Output p2 = ops::Multiply(scope.WithOpName("p2"), p1, shape); GrapplerItem item; item.fetch.push_back("p2"); TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { if (node.name() == "p2") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(3, node.input_size()); EXPECT_EQ("^v3", node.input(0)); EXPECT_EQ("^v1", node.input(1)); EXPECT_EQ("^v2", node.input(2)); Tensor value; CHECK(value.FromProto(node.attr().at("value").tensor())); EXPECT_EQ(715, value.flat<int>()(0)); EXPECT_EQ(1001, value.flat<int>()(1)); } } EXPECT_EQ(1, found); auto v1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3})); auto v2_t = GenerateRandomTensor<DT_FLOAT>({5, 7}); auto v3_t = GenerateRandomTensor<DT_FLOAT>({11, 13}); auto tensors_expected = EvaluateNodes( item.graph, item.fetch, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}}); EXPECT_EQ(1, item.fetch.size()); auto tensors = EvaluateNodes(output, item.fetch, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}}); EXPECT_EQ(1, item.fetch.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, ShapeMaterializationEmptyFetch) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output v1 = ops::Variable(scope.WithOpName("v1"), {3}, DT_FLOAT); Output v2 = ops::Variable(scope.WithOpName("v2"), {5, 7}, DT_FLOAT); Output v3 = ops::Variable(scope.WithOpName("v3"), {11, 13}, DT_FLOAT); Output rank = ops::Rank(scope.WithOpName("rank"), v1); Output shape = ops::Shape(scope.WithOpName("shape"), v2); Output size = ops::Size(scope.WithOpName("size"), v3); Output p1 = ops::Multiply(scope.WithOpName("p1"), size, rank); Output p2 = ops::Multiply(scope.WithOpName("p2"), p1, shape); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { if (node.name() == "size") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^v3", node.input(0)); Tensor value; CHECK(value.FromProto(node.attr().at("value").tensor())); EXPECT_EQ(11 * 13, value.flat<int>()(0)); } else if (node.name() == "rank") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^v1", node.input(0)); Tensor value; CHECK(value.FromProto(node.attr().at("value").tensor())); EXPECT_EQ(1, value.flat<int>()(0)); } else if (node.name() == "shape") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^v2", node.input(0)); Tensor value; CHECK(value.FromProto(node.attr().at("value").tensor())); EXPECT_EQ(5, value.flat<int>()(0)); EXPECT_EQ(7, value.flat<int>()(1)); } } EXPECT_EQ(3, found); auto v1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3})); auto v2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({5, 7})); auto v3_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({11, 13})); std::vector<string> fetch_nodes = {"p2"}; auto tensors_expected = EvaluateNodes( item.graph, fetch_nodes, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(output, fetch_nodes, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, ShapeMaterializationShapeN) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output v1 = ops::Variable(scope.WithOpName("v1"), {3, -1}, DT_FLOAT); Output v2 = ops::Variable(scope.WithOpName("v2"), {}, DT_FLOAT); Output v3 = ops::Variable(scope.WithOpName("v3"), {4, 6}, DT_FLOAT); auto s = ops::ShapeN(scope.WithOpName("s"), {v1, v2, v3}); Output i1a = ops::Identity(scope.WithOpName("i1a"), s[0]); Output i1b = ops::Identity(scope.WithOpName("i1b"), s[0]); Output i2a = ops::Identity(scope.WithOpName("i2a"), s[1]); Output i2b = ops::Identity(scope.WithOpName("i2b"), s[1]); Output i2c = ops::Identity(scope.WithOpName("i2c"), s[1]); Output i3a = ops::Identity(scope.WithOpName("i3a"), s[2]); Output i3b = ops::Identity(scope.WithOpName("i3b"), s[2]); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { EXPECT_NE(AddPrefixToNodeName("s-matshapes-0", kConstantFoldingConst), node.name()); EXPECT_NE(AddPrefixToNodeName("s-matshapes-1", kConstantFoldingConst), node.name()); if (node.name() == "i1a" || node.name() == "i1b") { ++found; EXPECT_EQ("s", node.input(0)); } if (node.name() == "i2a" || node.name() == "i2b" || node.name() == "i2c") { ++found; EXPECT_EQ("s:1", node.input(0)); } if (node.name() == "i3a" || node.name() == "i3b") { ++found; EXPECT_EQ(AddPrefixToNodeName("s-matshapes-2", kConstantFoldingConst), node.input(0)); } if (node.name() == "s") { ++found; EXPECT_EQ("ShapeN", node.op()); EXPECT_EQ("v1", node.input(0)); EXPECT_EQ("v2", node.input(1)); EXPECT_EQ("v3", node.input(2)); } if (node.name() == AddPrefixToNodeName("s-matshapes-2", kConstantFoldingConst)) { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ("^s", node.input(0)); Tensor value; CHECK(value.FromProto(node.attr().at("value").tensor())); EXPECT_EQ(4, value.flat<int>()(0)); EXPECT_EQ(6, value.flat<int>()(1)); } } EXPECT_EQ(9, found); auto v1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 4})); auto v2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({5, 6})); auto v3_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 6})); const std::vector<string> fetch_nodes = {"i1a", "i1b", "i2a", "i2b", "i2c", "i3a", "i3b"}; auto tensors_expected = EvaluateNodes( item.graph, fetch_nodes, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}}); EXPECT_EQ(fetch_nodes.size(), tensors_expected.size()); auto tensors = EvaluateNodes(output, fetch_nodes, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}}); EXPECT_EQ(fetch_nodes.size(), tensors.size()); for (int i = 0; i < fetch_nodes.size(); i++) test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } TEST_F(ConstantFoldingTest, ShapeMaterializationShapeN_MultipleOutputs) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output v1 = ops::Variable(scope.WithOpName("v1"), {3, -1}, DT_FLOAT); Output v2 = ops::Variable(scope.WithOpName("v2"), {4, 6}, DT_FLOAT); auto s = ops::ShapeN(scope.WithOpName("s"), {v1, v2}); auto id_n = ops::IdentityN(scope.WithOpName("id_n"), {s[0], s[1]}); Output ia = ops::Identity(scope.WithOpName("ia"), id_n[0]); Output ib = ops::Identity(scope.WithOpName("ib"), id_n[1]); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch.push_back("ia"); item.fetch.push_back("ib"); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { EXPECT_NE(AddPrefixToNodeName("s-matshapes-0", kConstantFoldingConst), node.name()); if (node.name() == "s") { ++found; EXPECT_EQ("ShapeN", node.op()); EXPECT_EQ("v1", node.input(0)); EXPECT_EQ("v2", node.input(1)); } if (node.name() == "id_n") { ++found; EXPECT_EQ("IdentityN", node.op()); EXPECT_EQ("s", node.input(0)); EXPECT_EQ(AddPrefixToNodeName("s-matshapes-1", kConstantFoldingConst), node.input(1)); } if (node.name() == "ia") { ++found; EXPECT_EQ("id_n", node.input(0)); } if (node.name() == "ib") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ("^s", node.input(0)); EXPECT_EQ("^id_n", node.input(1)); } } EXPECT_EQ(4, found); auto v1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 4})); auto v2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 6})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"v1", v1_t}, {"v2", v2_t}}); EXPECT_EQ(2, tensors_expected.size()); auto tensors = EvaluateNodes(output, item.fetch, {{"v1", v1_t}, {"v2", v2_t}}); EXPECT_EQ(2, tensors.size()); for (int i = 0; i < tensors.size(); i++) test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } TEST_F(ConstantFoldingTest, SwitchNodesEmptyFetch) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT); ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL); ops::Switch s1(scope.WithOpName("switch"), v_in, v_ctrl); ops::Rank rank(scope.WithOpName("rank"), s1.output_false); ops::Identity i(scope.WithOpName("i"), s1.output_true); ops::Size size(scope.WithOpName("size"), i); ops::Square p1(scope.WithOpName("p1"), rank); ops::Square p2(scope.WithOpName("p2"), size); ops::Merge m(scope.WithOpName("m"), {p1.y, p2.y}); Output predicate = ops::Const(scope.WithOpName("false"), false, TensorShape({})); Output constant = ops::Const(scope.WithOpName("constant"), 1.0f, TensorShape({1})); ops::Switch s2(scope.WithOpName("switch2"), constant, predicate); ops::Identity statically_known(scope.WithOpName("i2"), s2.output_false); ops::Identity never_generated(scope.WithOpName("i3"), s2.output_true); ops::Merge m2(scope.WithOpName("m2"), {statically_known.output, never_generated.output}); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); std::set<string> present_nodes = {"v_in", "v_ctrl", "switch", "i", "p1", "p2", "m", "false", "constant", "switch2", "i2", "i3", "m2", "ConstantFoldingCtrl/switch_0", "rank", "size"}; std::set<string> not_present_nodes = {"ConstantFolding/switch2-0"}; EXPECT_EQ(present_nodes.size(), output.node_size()); int found = 0; for (const auto& node : output.node()) { EXPECT_TRUE(present_nodes.find(node.name()) != present_nodes.end()) << node.name(); EXPECT_TRUE(not_present_nodes.find(node.name()) == not_present_nodes.end()) << node.name(); present_nodes.erase(node.name()); not_present_nodes.erase(node.name()); if (node.name() == "rank") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^ConstantFoldingCtrl/switch_0", node.input(0)); } if (node.name() == "size") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^i", node.input(0)); } if (node.name() == "i2") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(0, node.input_size()); } if (node.name() == "i3") { ++found; EXPECT_EQ("Identity", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("switch2:1", node.input(0)); } } EXPECT_EQ(4, found); auto v_in_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3})); Tensor v_ctrl_t(DT_BOOL, TensorShape({})); v_ctrl_t.flat<bool>()(0) = true; std::vector<string> fetch_nodes = {"m", "m2"}; auto tensors_expected = EvaluateNodes( item.graph, fetch_nodes, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors_expected.size()); auto tensors = EvaluateNodes(output, fetch_nodes, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); test::ExpectTensorNear<float>(tensors_expected[1], tensors[1], 1e-5); v_ctrl_t.flat<bool>()(0) = false; tensors_expected = EvaluateNodes(item.graph, fetch_nodes, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors_expected.size()); tensors = EvaluateNodes(output, fetch_nodes, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); test::ExpectTensorNear<float>(tensors_expected[1], tensors[1], 1e-5); } TEST_F(ConstantFoldingTest, SwitchNodes) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT); ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL); ops::Switch s1(scope.WithOpName("switch"), v_in, v_ctrl); ops::Rank rank(scope.WithOpName("rank"), s1.output_false); ops::Identity i(scope.WithOpName("i"), s1.output_true); ops::Size size(scope.WithOpName("size"), i); ops::Square p1(scope.WithOpName("p1"), rank); ops::Square p2(scope.WithOpName("p2"), size); ops::Merge m(scope.WithOpName("m"), {p1.y, p2.y}); Output predicate = ops::Const(scope.WithOpName("false"), false, TensorShape({})); Output constant = ops::Const(scope.WithOpName("constant"), 1.0f, TensorShape({1})); ops::Switch s2(scope.WithOpName("switch2"), constant, predicate); ops::Identity statically_known(scope.WithOpName("i2"), s2.output_false); ops::Identity never_generated(scope.WithOpName("i3"), s2.output_true); ops::Merge m2(scope.WithOpName("m2"), {statically_known.output, never_generated.output}); GrapplerItem item; item.fetch.push_back("m"); item.fetch.push_back("m2"); TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); std::set<string> present_nodes = {"v_in", "v_ctrl", "switch", "i", "p1", "p2", "m", "false", "constant", "switch2", "i2", "i3", "m2", "ConstantFoldingCtrl/switch_0"}; std::set<string> not_present_nodes = {"rank", "size", "ConstantFolding/switch2-0"}; EXPECT_EQ(present_nodes.size(), output.node_size()); int found = 0; for (const auto& node : output.node()) { EXPECT_TRUE(present_nodes.find(node.name()) != present_nodes.end()); EXPECT_TRUE(not_present_nodes.find(node.name()) == not_present_nodes.end()); present_nodes.erase(node.name()); not_present_nodes.erase(node.name()); if (node.name() == "i2") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(0, node.input_size()); } if (node.name() == "i3") { ++found; EXPECT_EQ("Identity", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("switch2:1", node.input(0)); } } EXPECT_EQ(2, found); auto v_in_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3})); Tensor v_ctrl_t(DT_BOOL, TensorShape({})); v_ctrl_t.flat<bool>()(0) = true; auto tensors_expected = EvaluateNodes( item.graph, item.fetch, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors_expected.size()); auto tensors = EvaluateNodes(output, item.fetch, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); test::ExpectTensorNear<float>(tensors_expected[1], tensors[1], 1e-5); v_ctrl_t.flat<bool>()(0) = false; tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors_expected.size()); tensors = EvaluateNodes(output, item.fetch, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}}); EXPECT_EQ(2, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); test::ExpectTensorNear<float>(tensors_expected[1], tensors[1], 1e-5); } TEST_F(ConstantFoldingTest, SplitRemoval) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), TensorShape({2}), DT_FLOAT); Output in2 = ops::Variable(scope.WithOpName("in2"), TensorShape({4}), DT_FLOAT); auto split_dim = ops::Const(scope.WithOpName("split_dim"), {0}, {}); ops::Split s1(scope.WithOpName("s1"), split_dim, in1, 1); ops::Split s2(scope.WithOpName("s2"), split_dim, in2, 2); ops::Add out(scope.WithOpName("out"), s1[0], s2[0]); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("split_dim", "Const", {}, {}, &want); AddNode("s1", "Identity", {"in1", AsControlDependency("split_dim")}, {}, &want); AddNode("s2", "Split", {"split_dim", "in2"}, {}, &want); AddNode("out", "Add", {"s1", "s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2})); auto in2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); } TEST_F(ConstantFoldingTest, SplitVRemoval) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), TensorShape({2}), DT_FLOAT); Output in2 = ops::Variable(scope.WithOpName("in2"), TensorShape({5}), DT_FLOAT); auto split_dim = ops::Const(scope.WithOpName("split_dim"), {0}, {}); auto size_splits1 = ops::Const(scope.WithOpName("size_splits1"), {2}, {1}); auto size_splits2 = ops::Const(scope.WithOpName("size_splits2"), {2, 3}, {2}); ops::SplitV s1(scope.WithOpName("s1"), in1, size_splits1, split_dim, 1); ops::SplitV s2(scope.WithOpName("s2"), in2, size_splits2, split_dim, 2); ops::Add out(scope.WithOpName("out"), s1[0], s2[0]); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("split_dim", "Const", {}, {}, &want); AddNode("size_splits1", "Const", {}, {}, &want); AddNode("size_splits2", "Const", {}, {}, &want); AddNode("s1", "Identity", {"in1", AsControlDependency("size_splits1"), AsControlDependency("split_dim")}, {}, &want); AddNode("s2", "SplitV", {"in2", "size_splits2", "split_dim"}, {}, &want); AddNode("out", "Add", {"s1", "s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2})); auto in2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({5})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); } TEST_F(ConstantFoldingTest, TransposeOnSize1DimsRemoval) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), TensorShape({1, 2, 4, 1}), DT_FLOAT); Output p1 = ops::Const(scope.WithOpName("p1"), {3, 2, 1, 0}, {4}); Output in2 = ops::Variable(scope.WithOpName("in2"), TensorShape({1, 4, 2, 1}), DT_FLOAT); Output p2 = ops::Const(scope.WithOpName("p2"), {3, 1, 2, 0}, {4}); ops::Transpose t1(scope.WithOpName("t1"), in1, p1); ops::Transpose t2(scope.WithOpName("t2").WithControlDependencies({in1}), in2, p2); ops::Add out1(scope.WithOpName("out1"), t1, t2); GrapplerItem item; item.fetch = {"out1"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("p1", "Const", {}, {}, &want); AddNode("p2", "Const", {}, {}, &want); AddNode("t1", "Transpose", {"in1", "p1"}, {}, &want); AddNode("t2", "Identity", {"in2", AsControlDependency("in1"), AsControlDependency("p2")}, {}, &want); AddNode("out1", "Add", {"t1", "t2"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, RandomShuffleOnScalarRemoval) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), TensorShape({}), DT_FLOAT); Output in2 = ops::Variable(scope.WithOpName("in2"), TensorShape({}), DT_FLOAT); ops::RandomShuffle s1(scope.WithOpName("s1"), in1); ops::RandomShuffle s2(scope.WithOpName("s2").WithControlDependencies({in1}), in2); ops::Add out1(scope.WithOpName("out1"), s1, s2); ops::Identity out2(scope.WithOpName("out2"), s2); GrapplerItem item; item.fetch = {"out1", "out2"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("s1", "Identity", {"in1"}, {}, &want); AddNode("s2", "Identity", {"in2", AsControlDependency("in1")}, {}, &want); AddNode("out1", "Add", {"s1", "s2"}, {}, &want); AddNode("out2", "Identity", {"s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({})); auto in2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(2, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(2, tensors.size()); for (int i = 0; i < tensors.size(); i++) test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-5); } TEST_F(ConstantFoldingTest, ReverseOnSize1DimsRemoval) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), TensorShape({1, 2, 4, 1}), DT_FLOAT); Output a1 = ops::Const(scope.WithOpName("a1"), {3, 2, 1, 0}, {4}); Output in2 = ops::Variable(scope.WithOpName("in2"), TensorShape({1, 2, 4, 1}), DT_FLOAT); Output a2 = ops::Const(scope.WithOpName("a2"), {0, 3}, {2}); ops::Reverse r1(scope.WithOpName("r1"), in1, a1); ops::Reverse r2(scope.WithOpName("r2").WithControlDependencies({in1}), in2, a2); ops::Add out1(scope.WithOpName("out1"), r1, r2); GrapplerItem item; item.fetch = {"out1"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("a1", "Const", {}, {}, &want); AddNode("a2", "Const", {}, {}, &want); AddNode("r1", "ReverseV2", {"in1", "a1"}, {}, &want); AddNode("r2", "Identity", {"in2", AsControlDependency("in1"), AsControlDependency("a2")}, {}, &want); AddNode("out1", "Add", {"r1", "r2"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, SliceWithSameDimensionRemoval) { { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); auto in1 = ops::Variable(scope.WithOpName("in1"), {3, 5}, DT_FLOAT); auto begin = ops::Const(scope.WithOpName("begin"), {0, 0}, {2}); auto size = ops::Const(scope.WithOpName("size"), {3, 5}, {2}); Output in2 = ops::Variable(scope.WithOpName("in2"), {4, 6}, DT_FLOAT); ops::Slice s1(scope.WithOpName("s1"), in1, begin, size); ops::Slice s2(scope.WithOpName("s2"), in2, begin, size); ops::Add out(scope.WithOpName("out"), s1, s2); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("begin", "Const", {}, {}, &want); AddNode("size", "Const", {}, {}, &want); AddNode("s1", "Identity", {"in1", AsControlDependency("begin"), AsControlDependency("size")}, {}, &want); AddNode("s2", "Slice", {"in2", "begin", "size"}, {}, &want); AddNode("out", "Add", {"s1", "s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5})); auto in2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 6})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); } { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); auto in1 = ops::Variable(scope.WithOpName("in1"), {3, 5}, DataType::DT_FLOAT); auto begin1 = ops::Const(scope.WithOpName("begin1"), {0, 0}, {2}); auto begin2 = ops::Const(scope.WithOpName("begin2"), {1, 1}, {2}); auto size = ops::Const(scope.WithOpName("size"), {-1, -1}, {2}); Output in2 = ops::Variable(scope.WithOpName("in2"), {4, 6}, DataType::DT_FLOAT); ops::Slice s1(scope.WithOpName("s1"), in1, begin1, size); ops::Slice s2(scope.WithOpName("s2"), in2, begin2, size); ops::Add out(scope.WithOpName("out"), s1, s2); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("begin1", "Const", {}, {}, &want); AddNode("begin2", "Const", {}, {}, &want); AddNode("size", "Const", {}, {}, &want); AddNode("s1", "Identity", {"in1", AsControlDependency("begin1"), AsControlDependency("size")}, {}, &want); AddNode("s2", "Slice", {"in2", "begin2", "size"}, {}, &want); AddNode("out", "Add", {"s1", "s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5})); auto in2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 6})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); } } TEST_F(ConstantFoldingTest, StridedSliceWithSameDimensionRemoval) { { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); auto in1 = ops::Variable(scope.WithOpName("in1"), {3, 5, 2}, DT_FLOAT); auto begin = ops::Const(scope.WithOpName("begin"), {0, 0}, {2}); auto end = ops::Const(scope.WithOpName("end"), {3, 5}, {2}); auto strides = ops::Const(scope.WithOpName("strides"), {1, 1}, {2}); Output in2 = ops::Variable(scope.WithOpName("in2"), {4, 6, 2}, DT_FLOAT); ops::StridedSlice s1(scope.WithOpName("s1"), in1, begin, end, strides); ops::StridedSlice s2(scope.WithOpName("s2"), in2, begin, end, strides); ops::Add out(scope.WithOpName("out"), s1, s2); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("begin", "Const", {}, {}, &want); AddNode("end", "Const", {}, {}, &want); AddNode("strides", "Const", {}, {}, &want); AddNode("s1", "Identity", {"in1", AsControlDependency("begin"), AsControlDependency("end"), AsControlDependency("strides")}, {}, &want); AddNode("s2", "StridedSlice", {"in2", "begin", "end", "strides"}, {}, &want); AddNode("out", "Add", {"s1", "s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 2})); auto in2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 6, 2})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); } { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); auto in1 = ops::Variable(scope.WithOpName("in1"), {2, 3, 4, 5, 6}, DT_FLOAT); auto begin1 = ops::Const(scope.WithOpName("begin1"), {0, 0, 0}, {3}); auto end1 = ops::Const(scope.WithOpName("end1"), {0, 5, 6}, {3}); auto strides1 = ops::Const(scope.WithOpName("strides1"), {1, 1, 1}, {3}); ops::StridedSlice s1( scope.WithOpName("s1"), in1, begin1, end1, strides1, ops::StridedSlice::Attrs().BeginMask(1).EndMask(1).EllipsisMask(2)); Output in2 = ops::Variable(scope.WithOpName("in2"), {5, 8, 5, 6, 9}, DT_FLOAT); auto begin2 = ops::Const(scope.WithOpName("begin2"), {0, 0, 0, 0, 0}, {5}); auto end2 = ops::Const(scope.WithOpName("end2"), {2, 3, 4, 5, 6}, {5}); auto strides2 = ops::Const(scope.WithOpName("strides2"), {1, 1, 1, 1, 1}, {5}); ops::StridedSlice s2(scope.WithOpName("s2"), in2, begin2, end2, strides2); ops::Add out(scope.WithOpName("out"), s1, s2); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("begin1", "Const", {}, {}, &want); AddNode("end1", "Const", {}, {}, &want); AddNode("strides1", "Const", {}, {}, &want); AddNode("s1", "Identity", {"in1", AsControlDependency("begin1"), AsControlDependency("end1"), AsControlDependency("strides1")}, {}, &want); AddNode("begin2", "Const", {}, {}, &want); AddNode("end2", "Const", {}, {}, &want); AddNode("strides2", "Const", {}, {}, &want); AddNode("s2", "StridedSlice", {"in2", "begin2", "end2", "strides2"}, {}, &want); AddNode("out", "Add", {"s1", "s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 3, 4, 5, 6})); auto in2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({5, 8, 5, 6, 9})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); } } TEST_F(ConstantFoldingTest, TileWithMultipliesBeingOne) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); auto in1 = ops::Variable(scope.WithOpName("in1"), {4, 6}, DT_FLOAT); auto in2 = ops::Variable(scope.WithOpName("in2"), {4, 3}, DT_FLOAT); auto multiplies1 = ops::Const(scope.WithOpName("multiplies1"), {1, 1}, {2}); auto multiplies2 = ops::Const(scope.WithOpName("multiplies2"), {1, 2}, {2}); ops::Tile t1(scope.WithOpName("t1"), in1, multiplies1); ops::Tile t2(scope.WithOpName("t2"), in2, multiplies2); ops::Add out(scope.WithOpName("out"), t1, t2); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("multiplies1", "Const", {}, {}, &want); AddNode("multiplies2", "Const", {}, {}, &want); AddNode("t1", "Identity", {"in1", AsControlDependency("multiplies1")}, {}, &want); AddNode("t2", "Tile", {"in2", "multiplies2"}, {}, &want); AddNode("out", "Add", {"t1", "t2"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, MergeConcat) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), {4, 6}, DT_FLOAT); Output in2 = ops::Variable(scope.WithOpName("in2"), {4, 6}, DT_FLOAT); Output in3 = ops::Variable(scope.WithOpName("in3"), {4, 6}, DT_FLOAT); Output axis = ops::Const(scope.WithOpName("axis"), 0, {}); ops::Concat c1(scope.WithOpName("c1"), {in1, in2}, axis); ops::Concat c2(scope.WithOpName("c2"), {Output(c1), in3}, axis); GrapplerItem item; item.fetch = {"c2"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("in3", "VariableV2", {}, {}, &want); AddNode("axis", "Const", {}, {}, &want); AddNode("c2", "ConcatV2", {"in1", "in2", "in3", "axis"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, MergeConcat_SameInput) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), {4, 6}, DT_FLOAT); Output in2 = ops::Variable(scope.WithOpName("in2"), {4, 6}, DT_FLOAT); Output in3 = ops::Variable(scope.WithOpName("in3"), {4, 6}, DT_FLOAT); Output axis = ops::Const(scope.WithOpName("axis"), 0, {}); ops::Concat c1(scope.WithOpName("c1"), {in1, in2}, axis); ops::Concat c2(scope.WithOpName("c2"), {Output(c1), in3, Output(c1)}, axis); GrapplerItem item; item.fetch = {"c2"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("in3", "VariableV2", {}, {}, &want); AddNode("axis", "Const", {}, {}, &want); AddNode("c2", "ConcatV2", {"in1", "in2", "in3", "in1", "in2", "axis"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, MergeConcat_ConcatWithConst) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), {2, 6}, DT_FLOAT); Output in2 = ops::Variable(scope.WithOpName("in2"), {}, DT_FLOAT); Output in3 = ops::Variable(scope.WithOpName("in3"), {4, 6}, DT_FLOAT); Output axis = ops::Const(scope.WithOpName("axis"), 0, {}); ops::Concat c1(scope.WithOpName("c1"), {in1, in2}, axis); ops::Concat c2(scope.WithOpName("c2"), {Output(c1), in3}, axis); GrapplerItem item; item.fetch = {"c2"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("in3", "VariableV2", {}, {}, &want); AddNode("axis", "Const", {}, {}, &want); AddNode("c2", "ConcatV2", {"in1", "in2", "in3", "axis"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, MergeConcat_AxisMismatch) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output in1 = ops::Variable(scope.WithOpName("in1"), {2, 5}, DT_FLOAT); Output in2 = ops::Variable(scope.WithOpName("in2"), {}, DT_FLOAT); Output in3 = ops::Variable(scope.WithOpName("in3"), {4, 6}, DT_FLOAT); Output axis1 = ops::Const(scope.WithOpName("axis1"), 0, {}); Output axis2 = ops::Const(scope.WithOpName("axis2"), 1, {}); ops::Concat c1(scope.WithOpName("c1"), {in1, in2}, axis2); ops::Concat c2(scope.WithOpName("c2"), {Output(c1), in3}, axis1); GrapplerItem item; item.fetch = {"c2"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("in3", "VariableV2", {}, {}, &want); AddNode("axis1", "Const", {}, {}, &want); AddNode("axis2", "Const", {}, {}, &want); AddNode("c1", "ConcatV2", {"in1", "in2", "axis2"}, {}, &want); AddNode("c2", "ConcatV2", {"c1", "in3", "axis1"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, MergeConcat_PartialFolding) { Scope scope = Scope::NewRootScope(); Output c1 = ops::Const(scope.WithOpName("c1"), 1.0f, {2, 2}); Output c2 = ops::Const(scope.WithOpName("c2"), 2.0f, {2, 2}); Output c3 = ops::Const(scope.WithOpName("c3"), 3.0f, {2, 2}); Output c4 = ops::Const(scope.WithOpName("c4"), 4.0f, {2, 2}); Output ph = ops::Placeholder(scope.WithOpName("ph"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output axis = ops::Const(scope.WithOpName("axis"), 0, {}); ops::Concat concat1(scope.WithOpName("concat1"), {c1, c2, ph}, axis); ops::Concat concat2(scope.WithOpName("concat2"), {c3, c4, Output(concat1)}, axis); GrapplerItem item; item.fetch = {"concat2"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("ConstantFolding/concat2_partial_split_0", "Const", {}, {}, &want); AddNode("axis", "Const", {}, {}, &want); AddNode("ph", "Placeholder", {}, {}, &want); AddNode("concat2", "ConcatV2", {"ConstantFolding/concat2_partial_split_0", "ph", "axis"}, {}, &want); CompareGraphs(want, got); } TEST_F(ConstantFoldingTest, PaddingWithZeroSize) { PaddingWithZeroSize<int32>(); PaddingWithZeroSize<int64_t>(); } TEST_F(ConstantFoldingTest, SqueezeWithAllDimensionsGreaterThanOne) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); auto in1 = ops::Variable(scope.WithOpName("in1"), {2, 3}, DT_INT32); auto in2 = ops::Variable(scope.WithOpName("in2"), {1, 2, 3, 1}, DT_INT32); ops::Squeeze s1(scope.WithOpName("s1"), in1); ops::Squeeze s2(scope.WithOpName("s2"), in2); ops::Add out(scope.WithOpName("out"), s1, s2); GrapplerItem item; item.fetch = {"out"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("in1", "VariableV2", {}, {}, &want); AddNode("in2", "VariableV2", {}, {}, &want); AddNode("s1", "Identity", {"in1"}, {}, &want); AddNode("s2", "Squeeze", {"in2"}, {}, &want); AddNode("out", "Add", {"s1", "s2"}, {}, &want); CompareGraphs(want, got); auto in1_t = GenerateRandomTensor<DT_INT32>(TensorShape({2, 3})); auto in2_t = GenerateRandomTensor<DT_INT32>(TensorShape({1, 2, 3, 1})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(got, item.fetch, {{"in1", in1_t}, {"in2", in2_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, NoOpReduction) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output v = ops::Variable(scope.WithOpName("v"), {3, 5, 7}, DT_FLOAT); Output c = ops::Const(scope.WithOpName("c").WithControlDependencies(v), 0, {0}); Output i = ops::Identity(scope.WithOpName("i"), c); Output p = ops::Prod(scope.WithOpName("p"), v, i); Output s = ops::Square(scope.WithOpName("s"), p); Output v2 = ops::Variable(scope.WithOpName("v2"), {3, 5, 1}, DT_FLOAT); Output c2 = ops::Const(scope.WithOpName("c2").WithControlDependencies(v), 2, {1}); ops::Prod::Attrs attr; attr = attr.KeepDims(true); Output p2 = ops::Prod(scope.WithOpName("p2"), v2, c2, attr); Output a = ops::Placeholder(scope.WithOpName("a"), DT_FLOAT); Output p3 = ops::Prod(scope.WithOpName("p3"), a, i, attr); GrapplerItem item; item.fetch = {"s", "p2", "p3"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { if (node.name() == "p") { found++; EXPECT_EQ("Identity", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("v", node.input(0)); EXPECT_EQ("^i", node.input(1)); } else if (node.name() == "p2") { found++; EXPECT_EQ("Identity", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("v2", node.input(0)); EXPECT_EQ("^c2", node.input(1)); } else if (node.name() == "p3") { found++; EXPECT_EQ("Identity", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("a", node.input(0)); EXPECT_EQ("^i", node.input(1)); } } EXPECT_EQ(3, found); auto v_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7})); auto v2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 1})); auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"v", v_t}, {"v2", v2_t}, {"a", a_t}}); EXPECT_EQ(3, tensors_expected.size()); auto tensors = EvaluateNodes(output, item.fetch, {{"v", v_t}, {"v2", v2_t}, {"a", a_t}}); EXPECT_EQ(3, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); test::ExpectTensorNear<float>(tensors_expected[1], tensors[1], 1e-5); test::ExpectTensorNear<float>(tensors_expected[2], tensors[2], 1e-5); } TEST_F(ConstantFoldingTest, SingleElementEmptyAxisReduction) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output input_var_three_dim = ops::Variable( scope.WithOpName("input_var_three_dim"), {1, 1, 1}, DT_FLOAT); Output input_var_one_dim = ops::Variable(scope.WithOpName("input_var_one_dim"), {1}, DT_FLOAT); Output one_axis = ops::Const(scope.WithOpName("one_axis"), {0}, {1}); Output multiple_axes = ops::Const(scope.WithOpName("multiple_axes"), {1, 0}, {2}); Output variable_axis = ops::Variable(scope.WithOpName("input_var_axis"), {1}, DT_INT32); ops::Mean::Attrs attr; attr = attr.KeepDims(false); Output mean_1 = ops::Mean(scope.WithOpName("mean_1"), input_var_three_dim, one_axis, attr.KeepDims(false)); Output mean_2 = ops::Mean(scope.WithOpName("mean_2"), input_var_three_dim, multiple_axes, attr.KeepDims(false)); Output mean_3 = ops::Mean(scope.WithOpName("mean_3"), input_var_one_dim, one_axis, attr.KeepDims(false)); Output mean_4 = ops::Mean(scope.WithOpName("mean_4"), input_var_three_dim, variable_axis, attr.KeepDims(false)); Output mean_5 = ops::Mean(scope.WithOpName("mean_5"), input_var_three_dim, multiple_axes, attr.KeepDims(true)); GrapplerItem item; item.fetch = {"mean_1", "mean_2", "mean_3", "mean_4", "mean_5"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { if (node.name() == "mean_1" || node.name() == "mean_2") { found++; EXPECT_EQ("Reshape", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("input_var_three_dim", node.input(0)); } else if (node.name() == "mean_3") { found++; EXPECT_EQ("Mean", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("input_var_one_dim", node.input(0)); } else if (node.name() == "mean_4") { found++; EXPECT_EQ("Mean", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("input_var_three_dim", node.input(0)); } else if (node.name() == "mean_5") { found++; EXPECT_EQ("Identity", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("^multiple_axes", node.input(1)); } } EXPECT_EQ(5, found); auto input_var_three_dim_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 1, 1})); auto input_var_one_dim_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({1})); Tensor input_var_axis_t(DT_INT32, TensorShape({1})); input_var_axis_t.flat<int32>()(0) = 0; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"input_var_three_dim", input_var_three_dim_t}, {"input_var_one_dim", input_var_one_dim_t}, {"input_var_axis", input_var_axis_t}}); EXPECT_EQ(5, tensors_expected.size()); auto tensors = EvaluateNodes(output, item.fetch, {{"input_var_three_dim", input_var_three_dim_t}, {"input_var_one_dim", input_var_one_dim_t}, {"input_var_axis", input_var_axis_t}}); EXPECT_EQ(5, tensors.size()); for (int i = 0; i < 5; ++i) { test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-5); } } TEST_F(ConstantFoldingTest, NoOpReshape) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output d1 = ops::Const(scope.WithOpName("d1"), 3.14f, {17}); Output v1 = ops::Variable(scope.WithOpName("v1"), {17}, DT_FLOAT); Output c1 = ops::Const(scope.WithOpName("c1").WithControlDependencies(v1), 17, {1}); Output i1 = ops::Identity(scope.WithOpName("i1"), c1); Output r1 = ops::Reshape(scope.WithOpName("r1").WithControlDependencies(d1), v1, i1); Output s1 = ops::Square(scope.WithOpName("s1"), r1); Output v3 = ops::Variable(scope.WithOpName("v3"), {5, 5, 5}, DT_FLOAT); Output c3 = ops::Const(scope.WithOpName("c3").WithControlDependencies(v3), 5, {3}); Output i3 = ops::Identity(scope.WithOpName("i3"), c3); Output r3 = ops::Reshape(scope.WithOpName("r3"), v3, i3); Output s3 = ops::Square(scope.WithOpName("s3"), r3); Output v4 = ops::Variable(scope.WithOpName("v4"), {5, 5, 5}, DT_FLOAT); Output c4 = ops::Const(scope.WithOpName("c4").WithControlDependencies(v4), {5, -1, 5}, {3}); Output i4 = ops::Identity(scope.WithOpName("i4"), c4); Output r4 = ops::Reshape(scope.WithOpName("r4"), v4, i4); Output s4 = ops::Square(scope.WithOpName("s4"), r4); Output v2 = ops::Variable(scope.WithOpName("v2"), {17, 1}, DT_FLOAT); Output c2 = ops::Const(scope.WithOpName("c2").WithControlDependencies(v2), 17, {1}); Output r2 = ops::Reshape(scope.WithOpName("r2"), v2, c2); Output s2 = ops::Square(scope.WithOpName("s2"), r2); GrapplerItem item; item.fetch = {"s1", "s2", "s3", "s4"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { if (node.name() == "r1") { ++found; EXPECT_EQ("Identity", node.op()); ASSERT_EQ(3, node.input_size()); EXPECT_EQ("v1", node.input(0)); EXPECT_EQ("^i1", node.input(1)); EXPECT_EQ("^d1", node.input(2)); } else if (node.name() == "r3") { ++found; EXPECT_EQ("Identity", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("v3", node.input(0)); EXPECT_EQ("^i3", node.input(1)); } else if (node.name() == "r4") { ++found; EXPECT_EQ("Identity", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("v4", node.input(0)); EXPECT_EQ("^i4", node.input(1)); } else if (node.name() == "r2") { ++found; EXPECT_EQ("Reshape", node.op()); ASSERT_EQ(2, node.input_size()); EXPECT_EQ("v2", node.input(0)); EXPECT_EQ("c2", node.input(1)); } } EXPECT_EQ(4, found); auto v1_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({17})); auto v2_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({17, 1})); auto v3_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({5, 5, 5})); auto v4_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({5, 5, 5})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}, {"v4", v4_t}}); EXPECT_EQ(4, tensors_expected.size()); auto tensors = EvaluateNodes(output, item.fetch, {{"v1", v1_t}, {"v2", v2_t}, {"v3", v3_t}, {"v4", v4_t}}); EXPECT_EQ(4, tensors.size()); for (int i = 0; i < tensors.size(); i++) test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-5); } TEST_F(ConstantFoldingTest, Packing) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output c = ops::Const(scope.WithOpName("c"), 3.14f, {1000}); Output i1 = ops::Identity(scope.WithOpName("i1"), c); Output i2 = ops::Identity(scope.WithOpName("i2"), c); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); const std::vector<string> fetch_nodes = {"i1", "i2"}; auto tensors_expected = EvaluateNodes(item.graph, fetch_nodes); EXPECT_EQ(fetch_nodes.size(), tensors_expected.size()); auto tensors = EvaluateNodes(output, fetch_nodes); EXPECT_EQ(fetch_nodes.size(), tensors.size()); for (int i = 0; i < fetch_nodes.size(); i++) test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-5); EXPECT_GT(8000, output.ByteSizeLong()); } TEST_F(ConstantFoldingTest, LargeConstantNoSizeIncrease) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); const int64_t large_constant_size = kMaxConstantSize + 1; Output a = ops::Variable(scope.WithOpName("a"), {1, 1}, DT_FLOAT); Output b_const = ops::Const(scope.WithOpName("b_const"), 3.14f, {1, large_constant_size}); Output b = ops::Identity(scope.WithOpName("b"), b_const); Output matmul = ops::MatMul(scope.WithOpName("matmul"), a, b); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); item.graph.Swap(&output); status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); for (const auto& node : output.node()) { if (node.name() == "b") { EXPECT_EQ("Const", node.op()); } } EXPECT_EQ(4, output.node_size()); EXPECT_LT(output.ByteSizeLong(), sizeof(float) * large_constant_size + 500); } TEST_F(ConstantFoldingTest, MaterializeBroadcastGradientArgs) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT, ops::Placeholder::Shape(PartialTensorShape({-1, -1}))); Output b = ops::Square(s.WithOpName("b"), a); Output c = ops::Mul(s.WithOpName("c"), a, b); Output d = ops::Shape(s.WithOpName("d"), a); Output e = ops::Shape(s.WithOpName("e"), b); auto f = ops::internal::BroadcastGradientArgs(s.WithOpName("f"), d, e); Output o1 = ops::Identity(s.WithOpName("o1"), f.r0); Output o2 = ops::Identity(s.WithOpName("o2"), f.r1); Output g = ops::Placeholder(s.WithOpName("g"), DT_FLOAT, ops::Placeholder::Shape(PartialTensorShape({1}))); Output h = ops::Shape(s.WithOpName("h"), g); auto i = ops::internal::BroadcastGradientArgs(s.WithOpName("i"), d, h); Output p1 = ops::Identity(s.WithOpName("p1"), i.r0); Output p2 = ops::Identity(s.WithOpName("p2"), i.r1); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); std::vector<string> fetch_nodes = {"o1", "o2", "p1", "p2"}; auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 5})); auto g_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({1})); auto tensors_expected = EvaluateNodes(item.graph, fetch_nodes, {{"a", a_t}, {"g", g_t}}); EXPECT_EQ(fetch_nodes.size(), tensors_expected.size()); item.graph.Swap(&output); status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { if (node.name() == "o1") { ++found; EXPECT_EQ(1, node.input_size()); EXPECT_EQ("ConstantFolding/f-bcastargs-0", node.input(0)); } else if (node.name() == "o2") { ++found; EXPECT_EQ(1, node.input_size()); EXPECT_EQ("ConstantFolding/f-bcastargs-1", node.input(0)); } else if (node.name() == "ConstantFolding/f-bcastargs-0") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^f", node.input(0)); EXPECT_EQ(0, TensorShape(node.attr().at("value").tensor().tensor_shape()) .num_elements()); } else if (node.name() == "ConstantFolding/f-bcastargs-1") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^f", node.input(0)); EXPECT_EQ(0, TensorShape(node.attr().at("value").tensor().tensor_shape()) .num_elements()); } else if (node.name() == "p1") { ++found; EXPECT_EQ(1, node.input_size()); EXPECT_EQ("i", node.input(0)); } else if (node.name() == "p2") { ++found; EXPECT_EQ(1, node.input_size()); EXPECT_EQ("i:1", node.input(0)); } } EXPECT_EQ(6, found); auto tensors = EvaluateNodes(output, fetch_nodes, {{"a", a_t}, {"g", g_t}}); EXPECT_EQ(fetch_nodes.size(), tensors.size()); for (int i = 0; i < fetch_nodes.size(); i++) test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } TEST_F(ConstantFoldingTest, MaterializeBroadcastGradientArgs_InfiniteLoop) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT, ops::Placeholder::Shape(PartialTensorShape({2, 2}))); Output b = ops::Square(s.WithOpName("b"), a); Output c = ops::Mul(s.WithOpName("c"), a, b); Output d = ops::Shape(s.WithOpName("d"), a); Output e = ops::Shape(s.WithOpName("e"), b); auto f = ops::internal::BroadcastGradientArgs(s.WithOpName("f"), d, e); Output o1 = ops::Identity(s.WithOpName("o1"), f.r0); Output o2 = ops::Identity(s.WithOpName("o2"), f.r1); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); std::vector<string> fetch_nodes = {"o1", "o2"}; auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto tensors_expected = EvaluateNodes(item.graph, fetch_nodes, {{"a", a_t}}); EXPECT_EQ(fetch_nodes.size(), tensors_expected.size()); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); item.graph.Swap(&output); status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(11, output.node_size()); int found = 0; for (const auto& node : output.node()) { if (node.name() == "ConstantFolding/f-folded-1") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("^a", node.input(0)); EXPECT_EQ("^b", node.input(1)); } else if (node.name() == "d") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^a", node.input(0)); } else if (node.name() == "e") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^b", node.input(0)); } else if (node.name() == "o1") { ++found; EXPECT_EQ(1, node.input_size()); EXPECT_EQ("ConstantFolding/f-bcastargs-0", node.input(0)); } else if (node.name() == "o2") { ++found; EXPECT_EQ(1, node.input_size()); EXPECT_EQ("ConstantFolding/f-bcastargs-1", node.input(0)); } else if (node.name() == "ConstantFolding/f-bcastargs-0") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^ConstantFolding/f-folded-1", node.input(0)); EXPECT_EQ(0, TensorShape(node.attr().at("value").tensor().tensor_shape()) .num_elements()); } else if (node.name() == "ConstantFolding/f-bcastargs-1") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^ConstantFolding/f-folded-1", node.input(0)); EXPECT_EQ(0, TensorShape(node.attr().at("value").tensor().tensor_shape()) .num_elements()); } } EXPECT_EQ(7, found); auto tensors = EvaluateNodes(output, fetch_nodes, {{"a", a_t}}); EXPECT_EQ(fetch_nodes.size(), tensors.size()); for (int i = 0; i < fetch_nodes.size(); i++) test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } TEST_F(ConstantFoldingTest, MaterializeReductionIndices) { for (bool use_reshape : {true, false}) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT, ops::Placeholder::Shape(PartialTensorShape({-1, -1}))); Output indices = ops::Placeholder( s.WithOpName("indices"), DT_INT32, ops::Placeholder::Shape(PartialTensorShape({use_reshape ? -1 : 2}))); Output sum = ops::Sum(s.WithOpName("sum"), input, indices); if (use_reshape) { Output size = ops::Const(s.WithOpName("size"), 1, {1}); Output reshape = ops::Reshape(s.WithOpName("reshape"), sum, size); } GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch.push_back(use_reshape ? "reshape" : "sum"); auto input_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 4})); Tensor indices_t(DT_INT32, TensorShape({2})); indices_t.flat<int>()(0) = 0; indices_t.flat<int>()(1) = 1; auto tensors_expected = EvaluateNodes( item.graph, item.fetch, {{"input", input_t}, {"indices", indices_t}}); EXPECT_EQ(1, tensors_expected.size()); ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); item.graph.Swap(&output); status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const auto& node : output.node()) { if (node.name() == "ConstantFolding/sum-reduction_indices") { ++found; EXPECT_EQ("Const", node.op()); EXPECT_EQ("^indices", node.input(0)); EXPECT_EQ(2, TensorShape(node.attr().at("value").tensor().tensor_shape()) .num_elements()); } else if (node.name() == "sum") { ++found; EXPECT_EQ("ConstantFolding/sum-reduction_indices", node.input(1)); } else if (node.name() == "indices") { ++found; } } EXPECT_EQ(3, found); auto tensors = EvaluateNodes(output, item.fetch, {{"input", input_t}, {"indices", indices_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-5); } } TEST_F(ConstantFoldingTest, MaterializeReductionIndices_NotFullReduction) { for (bool input_rank_known : {true, false}) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output input = (input_rank_known ? ops::Placeholder(s.WithOpName("input"), DT_FLOAT, ops::Placeholder::Shape( PartialTensorShape({-1, -1}))) : ops::Placeholder(s.WithOpName("input"), DT_FLOAT)); Output indices = ops::Placeholder(s.WithOpName("indices"), DT_INT32, ops::Placeholder::Shape( PartialTensorShape({input_rank_known ? 1 : 2}))); Output sum = ops::Sum(s.WithOpName("sum"), input, indices); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch.push_back("sum"); ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); CompareGraphs(item.graph, output); } } TEST_F(ConstantFoldingTest, LargeConstant) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output mat_diag = ops::Const(scope.WithOpName("mat_diag"), 3.14f, TensorShape({1024 * 4})); Output mat = ops::Diag(scope.WithOpName("mat"), mat_diag); Output out = ops::Identity(scope.WithOpName("out"), mat); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch.push_back("out"); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); int found = 0; for (const NodeDef& node : output.node()) { if (node.name() == "out") { EXPECT_EQ(node.op(), "Identity"); ASSERT_EQ(node.input_size(), 1); EXPECT_EQ(node.input(0), "mat"); ++found; } else if (node.name() == "mat") { EXPECT_EQ(node.op(), "Diag"); ASSERT_EQ(node.input_size(), 1); EXPECT_EQ(node.input(0), "mat_diag"); ++found; } } EXPECT_EQ(found, 2); EXPECT_LT(output.ByteSizeLong(), sizeof(int) * 4 * 1024 + 500); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); ASSERT_EQ(tensors_expected.size(), 1); auto tensors = EvaluateNodes(output, item.fetch); ASSERT_EQ(tensors.size(), 1); test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, SwitchIdenticalInputs) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = ops::Placeholder(s.WithOpName("x"), DT_BOOL, ops::Placeholder::Shape(TensorShape({}))); ops::Switch sw = ops::Switch(s.WithOpName("switch"), x, x); Output id_false = ops::LogicalNot(s.WithOpName("id_false"), sw.output_false); Output id_true = ops::LogicalNot(s.WithOpName("id_true"), sw.output_true); GrapplerItem item; item.fetch.push_back("id_false"); item.fetch.push_back("id_true"); TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(6, output.node_size()); int found = 0; for (const auto& node : output.node()) { if (node.name() == "switch" || node.name() == "x") { ++found; } if (node.name() == "id_false") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^ConstantFoldingCtrl/switch_0", node.input(0)); ++found; } if (node.name() == "id_true") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^ConstantFoldingCtrl/switch_1", node.input(0)); ++found; } if (node.name() == "ConstantFoldingCtrl/switch_0") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("switch", node.input(0)); ++found; } if (node.name() == "ConstantFoldingCtrl/switch_1") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("switch:1", node.input(0)); ++found; } } EXPECT_EQ(6, found); Tensor x_t(DT_BOOL, TensorShape({})); x_t.flat<bool>()(0) = true; auto tensors_expected = EvaluateNodes(item.graph, {"id_true"}, {{"x", x_t}}); EXPECT_EQ(1, tensors_expected.size()); auto tensors = EvaluateNodes(output, {"id_true"}, {{"x", x_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<bool>(tensors_expected[0], tensors[0]); x_t.flat<bool>()(0) = false; tensors_expected = EvaluateNodes(item.graph, {"id_false"}, {{"x", x_t}}); EXPECT_EQ(1, tensors_expected.size()); tensors = EvaluateNodes(output, {"id_false"}, {{"x", x_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<bool>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, PartialFolding_AssociativeAndCommutative) { std::function<Output(const Scope&, InputList)> addn_fun = [](const Scope& scope, InputList inputs) { return ops::AddN(scope, inputs); }; std::function<Output(const Scope&, InputList)> accumulate_fun = [](const Scope& scope, InputList inputs) { return ops::AccumulateNV2(scope, inputs, TensorShape({2, 2})); }; for (bool use_add_n : {true, false}) { auto fun = use_add_n ? addn_fun : accumulate_fun; const string op_name = use_add_n ? "AddN" : "AccumulateNV2"; Scope s = Scope::NewRootScope(); Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output z = ops::Placeholder(s.WithOpName("z"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output c1 = ops::Const(s.WithOpName("c1"), 1.0f, {2, 2}); Output c2 = ops::Const(s.WithOpName("c2"), 2.0f, {2, 2}); Output c3 = ops::Const(s.WithOpName("c3"), 3.0f, {2, 2}); Output acc0 = fun(s.WithOpName("acc0"), {c1, c2, c3}); Output acc1 = fun(s.WithOpName("acc1"), {x, y, z}); Output acc2 = fun(s.WithOpName("acc2"), {c1, x, y}); Output acc3 = fun(s.WithOpName("acc3"), {c1, c2, z}); Output acc4 = fun(s.WithOpName("acc4"), {c1, y, c2}); Output acc5 = fun(s.WithOpName("acc5"), {x, c1, c2}); Output acc6 = fun(s.WithOpName("acc6"), {x, c1, y, c2}); Output stack = ops::Stack(s.WithOpName("stack"), {acc0, acc1, acc2, acc3, acc4, acc5, acc6}); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"stack"}; ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(16, output.node_size()); for (const NodeDef& node : output.node()) { if (node.name() == "acc0") { EXPECT_EQ("Const", node.op()); } if (node.name() == "acc1") { EXPECT_EQ(op_name, node.op()); EXPECT_EQ(3, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("y", node.input(1)); EXPECT_EQ("z", node.input(2)); } if (node.name() == "acc2") { EXPECT_EQ(op_name, node.op()); EXPECT_EQ(3, node.input_size()); EXPECT_EQ("c1", node.input(0)); EXPECT_EQ("x", node.input(1)); EXPECT_EQ("y", node.input(2)); } if (node.name() == "acc3") { EXPECT_EQ(op_name, node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("ConstantFolding/acc3_partial_split_2", node.input(0)); EXPECT_EQ("z", node.input(1)); } if (node.name() == "acc4") { EXPECT_EQ(op_name, node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("ConstantFolding/acc4_partial_split_2", node.input(0)); EXPECT_EQ("y", node.input(1)); } if (node.name() == "acc5") { EXPECT_EQ(op_name, node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("ConstantFolding/acc5_partial_split_2", node.input(1)); } if (node.name() == "acc6") { EXPECT_EQ(op_name, node.op()); EXPECT_EQ(3, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("ConstantFolding/acc6_partial_split_2", node.input(1)); EXPECT_EQ("y", node.input(2)); } if (absl::StartsWith(node.name(), "ConstantFolding/")) { EXPECT_EQ("Const", node.op()); } } std::vector<string> fetch = {"acc0"}; auto tensors_expected = EvaluateNodes(item.graph, fetch); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(1, tensors_expected.size()); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } } TEST_F(ConstantFoldingTest, PartialFolding_Concat) { Scope s = Scope::NewRootScope(); Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output z = ops::Placeholder(s.WithOpName("z"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output axis = ops::Const(s.WithOpName("axis"), 0, {}); Output c1 = ops::Const(s.WithOpName("c1"), 1.0f, {2, 2}); Output c2 = ops::Const(s.WithOpName("c2"), 2.0f, {2, 2}); Output concat0 = ops::Concat(s.WithOpName("concat0"), {c1, c2, c1}, axis); Output concat1 = ops::Concat(s.WithOpName("concat1"), {x, y, z}, axis); Output concat2 = ops::Concat(s.WithOpName("concat2"), {c1, x, y}, axis); Output concat3 = ops::Concat(s.WithOpName("concat3"), {c1, c2, z}, axis); Output concat4 = ops::Concat(s.WithOpName("concat4"), {c1, y, c2}, axis); Output concat5 = ops::Concat(s.WithOpName("concat5"), {x, c1, c2}, axis); Output concat6 = ops::Concat(s.WithOpName("concat6"), {x, c1, y, c2}, axis); Output concat7 = ops::Concat(s.WithOpName("concat7"), {x, y, c1, c2}, axis); Output concat8 = ops::Concat(s.WithOpName("concat8"), {x, c1, c2, y}, axis); Output concat9 = ops::Concat(s.WithOpName("concat9"), {c1, c2, x, y}, axis); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"concat0", "concat1", "concat2", "concat3", "concat4", "concat5", "concat6", "concat7", "concat8", "concat9"}; auto tensors_expected = EvaluateNodes(item.graph, {"concat0"}); EXPECT_EQ(1, tensors_expected.size()); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); item.graph.Swap(&output); status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(21, output.node_size()); for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); if (node.name() == "concat0") { EXPECT_EQ("Const", node.op()); } else if (node.name() == "concat3") { EXPECT_EQ(3, node.input_size()); EXPECT_EQ("ConstantFolding/concat3_partial_split_0", node.input(0)); EXPECT_EQ("z", node.input(1)); EXPECT_EQ("axis", node.input(2)); } else if (node.name() == "concat5") { EXPECT_EQ(3, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("ConstantFolding/concat5_partial_split_1", node.input(1)); EXPECT_EQ("axis", node.input(2)); } else if (node.name() == "concat7") { EXPECT_EQ(4, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("y", node.input(1)); EXPECT_EQ("ConstantFolding/concat7_partial_split_2", node.input(2)); EXPECT_EQ("axis", node.input(3)); } else if (node.name() == "concat8") { EXPECT_EQ(4, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("ConstantFolding/concat8_partial_split_1", node.input(1)); EXPECT_EQ("y", node.input(2)); EXPECT_EQ("axis", node.input(3)); } else if (node.name() == "concat9") { EXPECT_EQ(4, node.input_size()); EXPECT_EQ("ConstantFolding/concat9_partial_split_0", node.input(0)); EXPECT_EQ("x", node.input(1)); EXPECT_EQ("y", node.input(2)); EXPECT_EQ("axis", node.input(3)); } else if (absl::StartsWith(node.name(), "ConstantFolding/")) { EXPECT_EQ("Const", node.op()); } else { EXPECT_EQ(item.graph.node(i).DebugString(), node.DebugString()); } } auto tensors = EvaluateNodes(output, {"concat0"}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ConstantFoldingTest, PartialFolding_IdentityN) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output x = ops::Placeholder(scope.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({}))); Output c1 = ops::Const(scope.WithOpName("c1"), 1.0f, {2, 2}); Output c2 = ops::Const(scope.WithOpName("c2"), 2.0f, {2, 2}); auto id_n = ops::IdentityN(scope.WithOpName("id_n"), {c1, x, c2}); auto id0 = ops::Identity(scope.WithOpName("id0"), id_n[0]); auto id1 = ops::Identity(scope.WithOpName("id1"), id_n[1]); auto add0 = ops::Add(scope.WithOpName("add0"), id_n[0], id_n[1]); auto add1 = ops::Add(scope.WithOpName("add1"), id_n[0], id_n[2]); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch.push_back("id0"); item.fetch.push_back("id1"); item.fetch.push_back("add0"); item.fetch.push_back("add1"); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(8, output.node_size()); for (const auto& node : output.node()) { if (node.name() == "id_n") { EXPECT_EQ(3, node.input_size()); EXPECT_EQ("c1", node.input(0)); EXPECT_EQ("x", node.input(1)); EXPECT_EQ("c2", node.input(2)); } if (node.name() == "id0") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^id_n", node.input(0)); } if ("id1" == node.name()) { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("id_n:1", node.input(0)); } if ("add0" == node.name()) { EXPECT_EQ(2, node.input_size()); EXPECT_EQ("c1", node.input(0)); EXPECT_EQ("id_n:1", node.input(1)); } if ("add1" == node.name()) { EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^id_n", node.input(0)); } } auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"x", x_t}}); EXPECT_EQ(4, tensors_expected.size()); auto tensors = EvaluateNodes(output, item.fetch, {{"x", x_t}}); EXPECT_EQ(4, tensors.size()); for (int i = 0; i < tensors.size(); i++) { test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-5); } } TEST_F(ConstantFoldingTest, TrivialPack) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output x = ops::RandomNormal(scope.WithOpName("x"), {2, 2}, DataType::DT_FLOAT); Output y = ops::Const(scope.WithOpName("y"), {2.0f}, {}); auto stack = ops::Stack(scope.WithOpName("stack").WithControlDependencies({y}), {x}, ops::Stack::Axis(1)); auto stack_no_axis = ops::Stack(scope.WithOpName("stack_no_axis"), {x}); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch = {"stack", "stack_no_axis"}; ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(7, output.node_size()); int found = 0; for (const auto& node : output.node()) { if (node.name() == "stack") { EXPECT_EQ("ExpandDims", node.op()); EXPECT_EQ(3, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("ConstantFolding/stack_const_axis", node.input(1)); EXPECT_EQ("^y", node.input(2)); ++found; } else if (node.name() == "stack_no_axis") { EXPECT_EQ("ExpandDims", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("ConstantFolding/stack_no_axis_const_axis", node.input(1)); ++found; } else if (node.name() == "ConstantFolding/stack_const_axis") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^x", node.input(0)); ++found; } } EXPECT_EQ(found, 3); std::vector<string> fetch = {"stack", "stack_no_axis"}; auto tensors_expected = EvaluateNodes(item.graph, fetch); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(2, tensors_expected.size()); EXPECT_EQ(2, tensors.size()); EXPECT_EQ(tensors_expected[0].shape(), tensors[0].shape()); EXPECT_EQ(tensors_expected[1].shape(), tensors[1].shape()); } TEST_F(ConstantFoldingTest, Enter) { GrapplerItem item; AttrValue frame_name; frame_name.set_s("foo"); AttrValue is_constant_true; is_constant_true.set_b(true); AttrValue is_constant_false; is_constant_false.set_b(false); AttrValue type; type.set_type(DT_FLOAT); AttrValue value; Tensor value_tensor(DT_FLOAT, TensorShape({})); value_tensor.flat<float>()(0) = 1; value_tensor.AsProtoTensorContent(value.mutable_tensor()); GraphDef& graph = item.graph; AddNode("x", "Placeholder", {}, {{"dtype", type}}, &graph); AddNode("c1", "Const", {"^x"}, {{"value", value}, {"dtype", type}}, &graph); AddNode("enter1", "Enter", {"x"}, {{"T", type}, {"frame_name", frame_name}, {"is_constant", is_constant_true}}, &graph); AddNode("enter2", "Enter", {"c1"}, {{"T", type}, {"frame_name", frame_name}, {"is_constant", is_constant_true}}, &graph); AddNode("enter3", "Enter", {"c1"}, {{"T", type}, {"frame_name", frame_name}, {"is_constant", is_constant_false}}, &graph); AddNode("id1", "Identity", {"enter1"}, {{"T", type}}, &graph); AddNode("id2", "Identity", {"enter2"}, {{"T", type}}, &graph); AddNode("id3", "Identity", {"enter2"}, {{"T", type}}, &graph); AddNode("id4", "Identity", {"enter3"}, {{"T", type}}, &graph); item.fetch.push_back("id1"); item.fetch.push_back("id2"); item.fetch.push_back("id3"); item.fetch.push_back("id4"); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); item.graph.Swap(&output); status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(9, output.node_size()); for (const NodeDef& node : output.node()) { if (node.name() == "id1") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("enter1", node.input(0)); } if (node.name() == "id2" || node.name() == "id3") { EXPECT_EQ("Const", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("^enter2", node.input(0)); } if (node.name() == "id4") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("enter3", node.input(0)); } } } TEST_F(ConstantFoldingTest, TensorArraySize) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output size = ops::Const(scope.WithOpName("size"), 5, TensorShape({})); Output placeholder = ops::Placeholder(scope.WithOpName("placeholder"), DT_RESOURCE, ops::Placeholder::Shape(TensorShape({2}))); Output foo = ops::Const(scope.WithOpName("foo"), 5.0f, TensorShape({})); auto dynamic_array = ops::TensorArray(scope.WithOpName("dynamic"), size, DT_FLOAT, ops::TensorArray::DynamicSize(true)); auto static_array = ops::TensorArray(scope.WithOpName("static"), size, DT_FLOAT, ops::TensorArray::DynamicSize(false)); auto dynamic_sz = ops::TensorArraySize( scope.WithOpName("dynamic_sz"), dynamic_array.handle, dynamic_array.flow); auto static_sz = ops::TensorArraySize(scope.WithOpName("static_sz"), static_array.handle, static_array.flow); auto placeholder_sz = ops::TensorArraySize(scope.WithOpName("placeholder_sz"), placeholder, foo); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, {"dynamic_sz", "static_sz"}); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); item.graph.Swap(&output); status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(8, output.node_size()); EXPECT_EQ("dynamic_sz", output.node(5).name()); EXPECT_EQ("TensorArraySizeV3", output.node(5).op()); EXPECT_EQ("static_sz", output.node(6).name()); EXPECT_EQ("Const", output.node(6).op()); EXPECT_EQ("placeholder_sz", output.node(7).name()); EXPECT_EQ("TensorArraySizeV3", output.node(7).op()); auto tensors_actual = EvaluateNodes(output, {"dynamic_sz", "static_sz"}); EXPECT_EQ(2, tensors_expected.size()); EXPECT_EQ(2, tensors_actual.size()); test::ExpectTensorEqual<int32>(tensors_expected[0], tensors_actual[0]); test::ExpectTensorEqual<int32>(tensors_expected[1], tensors_actual[1]); } TEST_F(ConstantFoldingTest, FoldingPreservesDenormalFlushing) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), std::numeric_limits<float>::min(), {1}); Output b = ops::Const(s.WithOpName("b"), 0.1f, {1}); Output c = ops::Mul(s.WithOpName("c"), a, b); GrapplerItem item; item.fetch.push_back("c"); TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(1, output.node_size()); const NodeDef& node_d = output.node(0); EXPECT_EQ("c", node_d.name()); EXPECT_EQ("Const", node_d.op()); std::vector<string> fetch = {"c"}; auto tensors_expected = EvaluateNodes(item.graph, fetch); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(1, tensors_expected.size()); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]); } TEST_F(ConstantFoldingTest, EvaluatingLargeConstantNoFoldingMergingLoop) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); int size = 10 * 1024 * 1024 / 4 / 2; Output nonconst = ops::RandomUniform(s.WithOpName("nonconst"), {size, 1}, DT_FLOAT); Output const1 = ops::Const(s.WithOpName("const1"), 0.0f, {size, 1}); Output const2 = ops::Const(s.WithOpName("const2"), 1.0f, {size, 1}); Output axis = ops::Const(s.WithOpName("axis"), -1, {}); Output concat1 = ops::Concat(s.WithOpName("concat1"), {nonconst, const1}, axis); Output result = ops::Concat(s.WithOpName("result"), {concat1, const2}, axis); GrapplerItem item; item.fetch.push_back("result"); TF_CHECK_OK(s.ToGraphDef(&item.graph)); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); std::vector<string> fetch = {"result"}; auto tensors_expected = EvaluateNodes(item.graph, fetch); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(1, tensors_expected.size()); EXPECT_EQ(1, tensors.size()); EXPECT_EQ(tensors_expected[0].shape(), tensors[0].shape()); } class ConstantFoldingCastConstTest : public GrapplerTest { protected: void ConstantFoldingCastConst(bool fetch_const, bool fetch_cast, bool fetch_const_child, bool fetch_cast_child) { if (!fetch_const && !fetch_cast && !fetch_const_child && !fetch_cast_child) { return; } tensorflow::Scope s = tensorflow::Scope::NewRootScope(); CreateCastConstGraph(s); GrapplerItem item; int expected_output_size = SetFetch(&item, fetch_const, fetch_cast, fetch_const_child, fetch_cast_child); TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output = ConstantFoldingOptimize(item); EXPECT_EQ(expected_output_size, output.node_size()); EvaluateAndCompareUnoptimized(item.graph, output, item.fetch); } private: void CreateCastConstGraph(const tensorflow::Scope& s) { Output const1 = ops::Const(s.WithOpName("const1"), 2, {5, 5}); Output cast = ops::Cast(s.WithOpName("cast"), const1, DT_FLOAT); Output const1_child = ops::Identity(s.WithOpName("const1_child"), const1); Output cast_child = ops::Identity(s.WithOpName("cast_child"), cast); } int SetFetch(GrapplerItem* item, bool fetch_const, bool fetch_cast, bool fetch_const_child, bool fetch_cast_child) { int expected_output_size = 0; if (fetch_const) { item->fetch.push_back("const1"); expected_output_size++; } if (fetch_cast) { item->fetch.push_back("cast"); expected_output_size++; } if (fetch_const_child) { item->fetch.push_back("const1_child"); expected_output_size++; } if (fetch_cast_child) { item->fetch.push_back("cast_child"); expected_output_size++; } return expected_output_size; } GraphDef ConstantFoldingOptimize(const GrapplerItem& item) { ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); return output; } void EvaluateAndCompareUnoptimized(const GraphDef& unoptimized_graph, const GraphDef& optimized_graph, const std::vector<string>& fetch_nodes) { auto tensors_expected = EvaluateNodes(unoptimized_graph, fetch_nodes); auto tensors = EvaluateNodes(optimized_graph, fetch_nodes); ASSERT_EQ(fetch_nodes.size(), tensors_expected.size()); ASSERT_EQ(fetch_nodes.size(), tensors.size()); for (int i = 0; i < fetch_nodes.size(); i++) { if (fetch_nodes[i] == "const1" || fetch_nodes[i] == "const1_child") { test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } else { test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]); } } } }; TEST_F(ConstantFoldingCastConstTest, CastConstFolding) { for (bool fetch_const : {false, true}) { for (bool fetch_cast : {false, true}) { for (bool fetch_const_child : {false, true}) { for (bool fetch_cast_child : {false, true}) { ConstantFoldingCastConst(fetch_const, fetch_cast, fetch_const_child, fetch_cast_child); } } } } } TEST_F(ConstantFoldingTest, MaterializeConstantValuedNode) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output x = ops::Placeholder(scope.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({1, 2, 3, 4}))); Output ones_like = ops::OnesLike(scope.WithOpName("ones_like"), x); Output zeros_like = ops::ZerosLike(scope.WithOpName("zeros_like"), x); Output fill = ops::Fill(scope.WithOpName("fill"), {4, 3, 2, 1}, 42); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch = {"ones_like", "zeros_like", "fill"}; auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 2, 3, 4})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"x", x_t}}); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(output.node_size(), 6); for (const auto& node : output.node()) { if (node.name() != "x") { EXPECT_EQ(node.op(), "Const"); } if (node.name() == "ones_like" || node.name() == "zeros_like") { ASSERT_EQ(node.input_size(), 1); EXPECT_EQ(node.input(0), "^x"); } if (node.name() == "fill") { ASSERT_EQ(node.input_size(), 2); EXPECT_EQ(node.input(0)[0], '^'); EXPECT_EQ(node.input(1)[0], '^'); } } auto tensors = EvaluateNodes(output, item.fetch, {{"x", x_t}}); ASSERT_EQ(item.fetch.size(), tensors.size()); ASSERT_EQ(tensors_expected.size(), tensors.size()); for (int i = 0; i < tensors.size(); i++) { if (item.fetch[i] == "fill") { test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } else { test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]); } } } TEST_F(ConstantFoldingTest, MaterializeConstantValuedNodeDisableCompression) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output x = ops::Placeholder(scope.WithOpName("x"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({1, 2, 3, 4}))); Output ones_like = ops::OnesLike(scope.WithOpName("ones_like"), x); Output zeros_like = ops::ZerosLike(scope.WithOpName("zeros_like"), x); Output fill = ops::Fill(scope.WithOpName("fill"), {4, 3, 2, 1}, 42); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch = {"ones_like", "zeros_like", "fill"}; auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 2, 3, 4})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"x", x_t}}); ConstantFolding optimizer(nullptr, true); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(output.node_size(), 6); for (const auto& node : output.node()) { if (node.name() == "ones_like") { EXPECT_EQ(node.op(), "OnesLike"); ASSERT_EQ(node.input_size(), 1); EXPECT_EQ(node.input(0), "x"); } if (node.name() == "zeros_like") { EXPECT_EQ(node.op(), "ZerosLike"); ASSERT_EQ(node.input_size(), 1); EXPECT_EQ(node.input(0), "x"); } if (node.name() == "fill") { EXPECT_EQ(node.op(), "Fill"); ASSERT_EQ(node.input_size(), 2); EXPECT_EQ(node.input(0), "Const/Const"); EXPECT_EQ(node.input(1), "Const_1/Const"); } } auto tensors = EvaluateNodes(output, item.fetch, {{"x", x_t}}); ASSERT_EQ(item.fetch.size(), tensors.size()); ASSERT_EQ(tensors_expected.size(), tensors.size()); for (int i = 0; i < tensors.size(); i++) { if (item.fetch[i] == "fill") { test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]); } else { test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]); } } } TEST_F(ConstantFoldingTest, MaterializeConstantValuedNodeHugeFill) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output value = ops::Const(scope.WithOpName("value"), 42, {}); Output shape_const = ops::Const(scope.WithOpName("shape"), {1024, 1024, 1024, 1024, 1024}, {5}); Output fill_huge = ops::Fill(scope.WithOpName("fill_huge"), shape_const, value); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); NodeDef* node = item.graph.mutable_node(0); ASSERT_EQ(node->name(), "value"); TensorProto* t = (*node->mutable_attr())["value"].mutable_tensor(); t->clear_int_val(); int val = 42; port::CopyFromArray(t->mutable_tensor_content(), reinterpret_cast<const char*>(&val), sizeof(int)); item.fetch = {"fill_huge"}; ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); EXPECT_EQ(output.node_size(), 3); for (const auto& node : output.node()) { EXPECT_EQ(node.op(), "Const"); if (node.name() == "fill_huge") { ASSERT_EQ(node.input_size(), 2); EXPECT_EQ(node.input(0), "^shape"); EXPECT_EQ(node.input(1), "^value"); } } } TEST_F(ConstantFoldingTest, BitcastDenormalFloats) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Tensor x_t(DT_INT64, TensorShape({2, 2})); x_t.flat<int64_t>()(0) = 9223372036854775807L; x_t.flat<int64_t>()(1) = 1L; x_t.flat<int64_t>()(2) = 9223372036854775807L; x_t.flat<int64_t>()(3) = 1L; Output x = ops::Const(scope.WithOpName("x"), x_t); Output y = ops::Bitcast(scope.WithOpName("y"), x, DT_FLOAT); Output z = ops::Bitcast(scope.WithOpName("z"), y, DT_INT64); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch = {"z"}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {}); ConstantFolding optimizer(nullptr); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); ASSERT_EQ(output.node_size(), 1); const NodeDef& node = output.node(0); EXPECT_EQ(node.name(), "z"); EXPECT_EQ(node.op(), "Const"); auto tensors = EvaluateNodes(output, item.fetch, {}); ASSERT_EQ(tensors.size(), 1); ASSERT_EQ(tensors_expected.size(), 1); test::ExpectTensorEqual<int64_t>(tensors[0], tensors_expected[0]); } TEST_F(ConstantFoldingTest, SimplifyCase) { using test::function::NDef; for (int index = 0; index < 2; ++index) { GrapplerItem item; constexpr char kDevice[] = "/job:localhost/replica:0/task:0/device:CPU:0"; AttrValue branches; auto* f = branches.mutable_list()->add_func(); f->set_name("XTimesTwo"); (*f->mutable_attr())["T"].set_type(DT_FLOAT); auto* g = branches.mutable_list()->add_func(); *g = *f; g->set_name("NonZero"); AttrValue output_shapes; output_shapes.mutable_list()->add_shape(); TensorShapeProto* g_shape = output_shapes.mutable_list()->add_shape(); g_shape->set_unknown_rank(true); const Tensor kZero = test::AsScalar<int32>(0); const Tensor kOne = test::AsScalar<int32>(1); item.graph = test::function::GDef( {NDef("one", "Const", {}, {{"value", index == 0 ? kZero : kOne}, {"dtype", DT_INT32}}, kDevice), NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice), NDef("case", "Case", {"one", "x"}, {{"Tin", DataTypeSlice{DT_FLOAT}}, {"Tout", DataTypeSlice{DT_FLOAT}}, {"branches", branches}, {"output_shapes", output_shapes}}, kDevice), NDef("y", "Identity", {"case"}, {{"T", DT_FLOAT}}, kDevice)}, { test::function::XTimesTwo(), test::function::NonZero(), }); VLOG(1) << "Before: " << item.graph.DebugString(); item.fetch = {"y"}; const Tensor kTwo = test::AsScalar<float>(2.0f); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"x", kTwo}}); ConstantFolding optimizer(nullptr); GraphDef optimized_graph; TF_ASSERT_OK( optimizer.Optimize(nullptr, item, &optimized_graph)); VLOG(1) << "After: " << optimized_graph.DebugString(); int pco_count = 0; for (const auto& node : optimized_graph.node()) { EXPECT_NE(node.op(), "Case"); if (node.op() == "PartitionedCall") { ++pco_count; const auto& shape_list = node.attr().at("_output_shapes").list(); ASSERT_EQ(shape_list.shape_size(), 1); EXPECT_EQ(shape_list.shape(0).dim_size(), 0); if (index == 0) { EXPECT_EQ(node.attr().at("f").func().name(), "XTimesTwo"); EXPECT_EQ(shape_list.shape(0).unknown_rank(), false); } else { EXPECT_EQ(node.attr().at("f").func().name(), "NonZero"); EXPECT_EQ(shape_list.shape(0).unknown_rank(), true); } } } EXPECT_EQ(pco_count, 1); auto tensors = EvaluateNodes(optimized_graph, item.fetch, {{"x", kTwo}}); ASSERT_EQ(tensors.size(), tensors_expected.size()); test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]); } } TEST_F(ConstantFoldingTest, SimplifySelect) { for (bool scalar_pred : {true, false}) { for (bool pred_val : {true, false}) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); std::unique_ptr<Tensor> if_t; if (scalar_pred) { if_t.reset(new Tensor(DT_BOOL, TensorShape())); } else { if_t.reset(new Tensor(DT_BOOL, TensorShape({2, 2}))); } for (int i = 0; i < (scalar_pred ? 1 : 4); ++i) { if_t->flat<bool>()(i) = pred_val; } Output if_ = ops::Const(scope.WithOpName("if"), *if_t); Output then_ = ops::Placeholder(scope.WithOpName("then"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output else_ = ops::Placeholder(scope.WithOpName("else"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 2}))); Output select = ops::SelectV2(scope.WithOpName("select"), if_, then_, else_); Output id = ops::Identity(scope.WithOpName("id"), select); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch = {"id"}; const Tensor kOne = test::AsTensor<float>({1.0f, 1.0f, 1.0f, 1.0f}, TensorShape({2, 2})); const Tensor kTwo = test::AsTensor<float>({2.0f, 2.0f, 2.0f, 2.0f}, TensorShape({2, 2})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"then", kOne}, {"else", kTwo}}); ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef optimized_graph; TF_EXPECT_OK( optimizer.Optimize(nullptr, item, &optimized_graph)); ASSERT_EQ(optimized_graph.node_size(), 5); bool found = false; for (const auto& node : optimized_graph.node()) { if (node.name() == "select") { found = true; EXPECT_EQ(node.op(), "Identity"); ASSERT_EQ(node.input_size(), 3); EXPECT_EQ(node.input(0), pred_val ? "then" : "else"); EXPECT_EQ(node.input(1), pred_val ? "^if" : "^then"); EXPECT_EQ(node.input(2), pred_val ? "^else" : "^if"); } } EXPECT_TRUE(found); auto tensors = EvaluateNodes(optimized_graph, item.fetch, {{"then", kOne}, {"else", kTwo}}); ASSERT_EQ(tensors.size(), 1); ASSERT_EQ(tensors_expected.size(), 1); test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]); } } } TEST_F(ConstantFoldingTest, SimplifySelect_BroadcastTo) { for (TensorShape pred_shape : {TensorShape{2, 1}, TensorShape{2, 2, 1}}) { for (bool pred_val : {true, false}) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); std::unique_ptr<Tensor> if_t; if_t.reset(new Tensor(DT_BOOL, pred_shape)); for (int i = 0; i < pred_shape.num_elements(); ++i) { if_t->flat<bool>()(i) = pred_val; } Output if_ = ops::Const(scope.WithOpName("if"), *if_t); Output then_ = ops::Placeholder(scope.WithOpName("then"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 1}))); Output else_ = ops::Placeholder(scope.WithOpName("else"), DT_FLOAT, ops::Placeholder::Shape(TensorShape({2, 4}))); Output select = ops::SelectV2(scope.WithOpName("select"), if_, then_, else_); Output id = ops::Identity(scope.WithOpName("id"), select); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch = {"id"}; const Tensor kOne = test::AsTensor<float>({1.0f, 1.0f}, TensorShape({2, 1})); const Tensor kTwo = test::AsTensor<float>( {2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f}, TensorShape({2, 4})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"then", kOne}, {"else", kTwo}}); ConstantFolding optimizer(RewriterConfig::AGGRESSIVE, nullptr); GraphDef optimized_graph; TF_EXPECT_OK( optimizer.Optimize(nullptr, item, &optimized_graph)); ASSERT_EQ(optimized_graph.node_size(), 6); bool found = false; for (const auto& node : optimized_graph.node()) { if (node.name() == "select") { found = true; EXPECT_EQ(node.op(), "BroadcastTo"); ASSERT_EQ(node.input_size(), 4); EXPECT_EQ(node.input(0), pred_val ? "then" : "else"); EXPECT_EQ(node.input(1), strings::StrCat("ConstantFolding/select-broadcastto_shape-", pred_val ? 1 : 2)); EXPECT_EQ(node.input(2), pred_val ? "^else" : "^if"); EXPECT_EQ(node.input(3), pred_val ? "^if" : "^then"); } } EXPECT_TRUE(found); auto tensors = EvaluateNodes(optimized_graph, item.fetch, {{"then", kOne}, {"else", kTwo}}); ASSERT_EQ(tensors.size(), 1); ASSERT_EQ(tensors_expected.size(), 1); ASSERT_EQ(tensors[0].shape(), pred_shape.num_elements() == 2 ? TensorShape({2, 4}) : TensorShape({2, 2, 4})); test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]); } } } TEST_F(ConstantFoldingTest, QuantizationEmulation) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output x = ops::Const(scope.WithOpName("x"), {0.0f, 1.0f, 2.0f, 3.0f}, {4}); Output min_range = ops::Const(scope.WithOpName("min_range"), 0.0f, {}); Output max_range = ops::Const(scope.WithOpName("max_range"), 3.0f, {}); Output y = ops::QuantizeAndDequantizeV2(scope.WithOpName("y"), x, min_range, max_range); Output id = ops::Identity(scope.WithOpName("id"), y); GrapplerItem item; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); item.fetch = {"id"}; std::vector<Tensor> expected_tensors = EvaluateNodes(item.graph, item.fetch); for (const bool fold_quantization_emulation : {false, true}) { ConstantFolding optimizer(nullptr, false, fold_quantization_emulation); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); int num_quantization_emulation_ops = 0; for (const NodeDef& node : output.node()) { if (node.op() == "QuantizeAndDequantizeV2") { num_quantization_emulation_ops++; } } EXPECT_EQ(fold_quantization_emulation ? 0 : 1, num_quantization_emulation_ops); std::vector<Tensor> actual_tensors = EvaluateNodes(output, item.fetch); for (int i = 0; i < item.fetch.size(); ++i) { test::ExpectTensorEqual<float>(expected_tensors[i], actual_tensors[i]); } } } static void add_full_type(GrapplerItem& item, std::string node_name) { for (int i = 0; i < item.graph.node_size(); ++i) { NodeDef* node = item.graph.mutable_node(i); if (node->name() == node_name) { FullTypeDef t; t.set_type_id(TFT_PRODUCT); t.add_args()->set_type_id(TFT_TENSOR); t.mutable_args(0)->add_args()->set_type_id(TFT_FLOAT); *node->mutable_experimental_type() = t; break; } } } TEST_F(ConstantFoldingTest, RedundantVariableUpdateAddZeroToVar) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output var = ops::Variable(scope.WithOpName("var"), {2, 2}, DT_FLOAT); Output zeros = ops::Const(scope.WithOpName("zeros_const"), 0.0f, {2, 2}); Output b = ops::AssignAdd(scope.WithOpName("assign_add"), var, zeros); GrapplerItem item; item.fetch = {"assign_add"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); add_full_type(item, "assign_add"); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("var", "VariableV2", {}, {}, &want); AddNode("zeros_const", "Const", {}, {}, &want); AddNode("assign_add", "Identity", {"var", "^zeros_const"}, {}, &want); CompareGraphs(want, got); TF_EXPECT_OK(status); for (int i = 0; i < got.node_size(); ++i) { const NodeDef& node = got.node(i); if (node.name() == "assign_add") { FullTypeDef t1 = node.experimental_type(); EXPECT_EQ(t1.type_id(), TFT_PRODUCT); EXPECT_EQ(t1.args_size(), 1); FullTypeDef t2 = t1.args(0); EXPECT_EQ(t2.type_id(), TFT_TENSOR); EXPECT_EQ(t2.args_size(), 1); FullTypeDef t3 = t1.args(0).args(0); EXPECT_EQ(t3.type_id(), TFT_FLOAT); EXPECT_EQ(t3.args_size(), 0); } } } TEST_F(ConstantFoldingTest, RedundantVariableUpdateAddZerosToHandle) { tensorflow::Scope scope = tensorflow::Scope::NewRootScope(); Output handle = ops::VarHandleOp(scope.WithOpName("handle"), DT_FLOAT, {2, 2}); Output zeros = ops::Const(scope.WithOpName("zeros_const"), 0.0f, {2, 2}); auto b = ops::AssignAddVariableOp(scope.WithOpName("assign_add_var"), handle, zeros); GrapplerItem item; item.fetch = {"assign_add_var"}; TF_CHECK_OK(scope.ToGraphDef(&item.graph)); add_full_type(item, "assign_add_var"); ConstantFolding optimizer(nullptr); GraphDef got; Status status = optimizer.Optimize(nullptr, item, &got); TF_EXPECT_OK(status); GraphDef want; AddNode("handle", "VarHandleOp", {}, {}, &want); AddNode("zeros_const", "Const", {}, {}, &want); AddNode("assign_add_var", "NoOp", {"^handle", "^zeros_const"}, {}, &want); CompareGraphs(want, got); TF_EXPECT_OK(status); for (int i = 0; i < got.node_size(); ++i) { const NodeDef& node = got.node(i); if (node.name() == "assign_add_var") { FullTypeDef t = node.experimental_type(); EXPECT_TRUE((t.type_id() == TFT_UNSET) || ((t.type_id() == TFT_PRODUCT) && (t.args_size() == 0))); } } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/constant_folding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/constant_folding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8929e73e-7c5f-4fe5-94e1-a64bdd7e2baf
cpp
tensorflow/tensorflow
dtensor_location
tensorflow/dtensor/mlir/dtensor_location.cc
tensorflow/dtensor/mlir/dtensor_location_test.cc
#include "tensorflow/dtensor/mlir/dtensor_location.h" #include <algorithm> #include <queue> #include <string> #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/utils/name_utils.h" namespace tensorflow { namespace dtensor { namespace { std::string CreateLocalLocationString(mlir::FileLineColLoc loc) { return llvm::formatv(">> {0}:{1}:{2}", loc.getFilename(), loc.getLine(), loc.getColumn()) .str(); } } mlir::Location DTensorLocation(mlir::Location loc, llvm::StringRef file, unsigned int line, llvm::StringRef name) { auto split = file.rsplit("/"); if (!split.second.empty()) file = split.second; mlir::Location callee_loc = mlir::FileLineColLoc::get(loc.getContext(), file, line, 0); std::string new_name = GetNameFromLoc(loc); if (!new_name.empty()) { if (!name.empty()) { new_name = llvm::formatv("{0}/{1}", new_name, name).str(); } callee_loc = mlir::NameLoc::get( mlir::StringAttr::get(loc.getContext(), new_name), callee_loc); } return mlir::CallSiteLoc::get(callee_loc, loc); } mlir::Location DTensorLocation(mlir::Operation* op, llvm::StringRef file, unsigned int line, llvm::StringRef name) { return DTensorLocation(op->getLoc(), file, line, name); } std::string DTensorLocationToString(mlir::Location loc) { llvm::SmallVector<std::string, 4> stack; std::queue<mlir::Location> queue; queue.push(loc); while (!queue.empty()) { mlir::Location& front = queue.front(); if (auto name_loc = mlir::dyn_cast<mlir::NameLoc>(front)) { queue.push(name_loc.getChildLoc()); } else if (auto callsite_loc = mlir::dyn_cast<mlir::CallSiteLoc>(front)) { queue.push(callsite_loc.getCallee()); queue.push(callsite_loc.getCaller()); } else if (auto line_loc = mlir::dyn_cast<mlir::FileLineColLoc>(front)) { stack.push_back(CreateLocalLocationString(line_loc)); } queue.pop(); } std::reverse(stack.begin(), stack.end()); std::string s; llvm::raw_string_ostream ss(s); llvm::interleave(stack, ss, "\n"); return ss.str(); } } }
#include "tensorflow/dtensor/mlir/dtensor_location.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/utils/name_utils.h" #include "tensorflow/core/platform/test.h" namespace { void CheckFileLineColLocation(mlir::Location loc, unsigned line, unsigned column) { ASSERT_TRUE(mlir::isa<mlir::FileLineColLoc>(loc)); auto file_line_col_loc = mlir::cast<mlir::FileLineColLoc>(loc); EXPECT_EQ(file_line_col_loc.getFilename(), "test.cc"); EXPECT_EQ(file_line_col_loc.getLine(), line); EXPECT_EQ(file_line_col_loc.getColumn(), column); } TEST(DTensorLocationTest, HandlesEmptyLocation) { mlir::MLIRContext ctx; mlir::Location loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20); loc = tensorflow::dtensor::DTensorLocation(loc, "test.cc", 21); ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(loc)); auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(loc); CheckFileLineColLocation(callsite_loc.getCallee(), 21, 0); CheckFileLineColLocation(callsite_loc.getCaller(), 10, 20); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(loc), stack); } TEST(DTensorLocationTest, HandlesMultipleCalls) { mlir::MLIRContext ctx; mlir::Location test_loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 22); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 23); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 24); auto verify_loc = test_loc; for (int i = 0; i < 4; ++i) { ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(verify_loc)); auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(verify_loc); auto callee_loc = callsite_loc.getCallee(); CheckFileLineColLocation(callee_loc, 24 - i, 0); verify_loc = callsite_loc.getCaller(); } CheckFileLineColLocation(verify_loc, 10, 20); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0 >> test.cc:22:0 >> test.cc:23:0 >> test.cc:24:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack); } TEST(DTensorLocationTest, HandlesNameLoc) { mlir::MLIRContext ctx; mlir::Location test_loc = mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"), mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20)); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21); ASSERT_EQ(mlir::GetNameFromLoc(test_loc), "op"); ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(test_loc)); auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(test_loc); mlir::Location caller_loc = mlir::cast<mlir::CallSiteLoc>(test_loc).getCaller(); ASSERT_TRUE(mlir::isa<mlir::NameLoc>(caller_loc)); CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(caller_loc).getChildLoc(), 10, 20); mlir::Location callee_loc = callsite_loc.getCallee(); ASSERT_TRUE(mlir::isa<mlir::NameLoc>(callee_loc)); CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(callee_loc).getChildLoc(), 21, 0); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack); } TEST(DTensorLocationTest, HandlesNameLocWithName) { mlir::MLIRContext ctx; mlir::Location test_loc = mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"), mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20)); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21, "nested"); EXPECT_EQ(mlir::GetNameFromLoc(test_loc), "op/nested"); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/mlir/dtensor_location.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/mlir/dtensor_location_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f0deab1d-9d80-452e-9d1c-afc5405877d4
cpp
tensorflow/tensorflow
tensor_layout
tensorflow/dtensor/cc/tensor_layout.cc
tensorflow/dtensor/tests/tensor_layout_test.cc
#include "tensorflow/dtensor/cc/tensor_layout.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <map> #include <memory> #include <numeric> #include <set> #include <string> #include <utility> #include <vector> #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "mlir/IR/BuiltinTypes.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/math/math_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/fingerprint.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/proto/layout.pb.h" namespace tensorflow { namespace dtensor { constexpr const char* Layout::kUnshardedDim; constexpr const char* Layout::kAny; constexpr const char* Layout::kEmptyLayoutString; constexpr const char* Layout::kMatch; constexpr const char* Mesh::kEmptyMeshString; constexpr const char* Mesh::kUseXLASPMDString; constexpr bool Mesh::kUseXLASPMD; namespace { ShardVector ExpandShardVector(const ShardVector& shard_vec, const std::vector<int>& new_num_shards_per_dim) { if (shard_vec.shards.empty()) return shard_vec; auto ExpandShard = [shard_vec, new_num_shards_per_dim]( const Shard& shard, int dim_ind) -> std::vector<Shard> { int original_dim_size = shard_vec.num_shards_per_dim[dim_ind]; int new_dim_size = new_num_shards_per_dim[dim_ind]; int size_ratio = new_dim_size / original_dim_size; std::vector<Shard> expanded_shards; expanded_shards.reserve(size_ratio); for (int i = 0; i < size_ratio; ++i) { int original_coord = shard[dim_ind]; int shifted_coord = (original_coord - 1) * size_ratio + 1 + i; Shard new_shard = shard; new_shard[dim_ind] = shifted_coord; expanded_shards.push_back(new_shard); } return expanded_shards; }; std::vector<Shard> total_expanded_shards = shard_vec.shards; for (int dim_ind = 0; dim_ind < new_num_shards_per_dim.size(); ++dim_ind) { std::vector<Shard> dim_expanded_shards; for (const auto& shard : total_expanded_shards) { std::vector<Shard> expanded_shards = ExpandShard(shard, dim_ind); dim_expanded_shards.insert(dim_expanded_shards.end(), expanded_shards.begin(), expanded_shards.end()); } total_expanded_shards = dim_expanded_shards; } std::sort(total_expanded_shards.begin(), total_expanded_shards.end()); ShardVector expanded_shard_vec; expanded_shard_vec.shards = total_expanded_shards; expanded_shard_vec.num_shards_per_dim = new_num_shards_per_dim; return expanded_shard_vec; } } std::vector<DeviceLocation> ComputeDeviceLocations(const Mesh& mesh) { std::vector<DeviceLocation> mesh_locs(mesh.size()); for (size_t i = 0; i < mesh.size(); ++i) mesh_locs[i] = *(mesh.device_location(i)); return mesh_locs; } bool ShardVector::operator==(const ShardVector& other) const { if (this->shards.empty() && other.shards.empty()) return true; if (this->shards.empty() || other.shards.empty()) return false; if (this->num_shards_per_dim.size() != other.num_shards_per_dim.size()) return false; Shard first_shard_this = this->shards[0]; Shard first_shard_other = other.shards[0]; std::vector<int> new_sizes; for (size_t i = 0; i < first_shard_this.size(); ++i) { int lcm = this->num_shards_per_dim[i] * other.num_shards_per_dim[i] / MathUtil::GCD(static_cast<unsigned>(this->num_shards_per_dim[i]), static_cast<unsigned>(other.num_shards_per_dim[i])); new_sizes.push_back(lcm); } return ExpandShardVector(*this, new_sizes).shards == ExpandShardVector(other, new_sizes).shards; } std::string ShardVector::ToString() const { std::string string = "shards:["; std::vector<std::string> shard_strs; shard_strs.reserve(shards.size()); for (const Shard& shard : shards) shard_strs.push_back("(" + absl::StrJoin(shard, ",") + ")"); absl::StrAppend(&string, absl::StrJoin(shard_strs, ",")); absl::StrAppend(&string, "] num_shards_per_dim:("); absl::StrAppend(&string, absl::StrJoin(num_shards_per_dim, ",") + ")"); return string; } bool ShardVector::ContainsShard(const Shard& shard) const { for (const auto& shard_in_vec : shards) if (shard_in_vec == shard) return true; return false; } bool IsDynamicSize(int64_t size) { return mlir::ShapedType::isDynamic(size) || size == -1; } bool IsDynamicShape(absl::Span<const int64_t> shape) { for (int64_t size : shape) { if (IsDynamicSize(size)) return true; } return false; } std::map<std::string, std::vector<int>>& Mesh::tpu_core_ids() { static auto tpu_core_ids = new std::map<std::string, std::vector<int>>(); return *tpu_core_ids; } std::string& Mesh::tpu_host_mesh() { static auto tpu_host_mesh = new std::string; return *tpu_host_mesh; } StatusOr<Mesh> Mesh::ParseFromProto(const MeshProto& proto) { Mesh mesh; mesh.name_ = proto.name(); mesh.use_xla_spmd_ = proto.use_xla_spmd(); if (proto.single_device().empty()) { mesh.mesh_type_ = MeshType::kTile; for (const auto& device : proto.local_devices()) { mesh.local_devices_.push_back(device); } for (const auto& device_id : proto.local_device_ids()) { mesh.local_device_ids_.push_back(device_id); } for (const auto& device_id : proto.global_device_ids()) { mesh.global_device_ids_.push_back(device_id); } for (const auto& device : proto.global_devices()) { mesh.global_devices_.push_back(device); } mesh.mesh_dims_.resize(proto.mesh_dimensions_size()); for (int i = 0; i < proto.mesh_dimensions_size(); ++i) { const MeshDimensionProto& dim = proto.mesh_dimensions(i); mesh.mesh_dims_[i].name = dim.name(); mesh.mesh_dims_[i].size = dim.size(); } int64 mesh_size = mesh.size(); int num_devices = proto.global_device_ids_size(); if (mesh_size > 0 && mesh_size != num_devices) { TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("Number of devices ", num_devices, " not matching mesh size ", mesh_size))); } } else { mesh.mesh_type_ = MeshType::kSingleDevice; mesh.single_device_ = proto.single_device(); } return mesh; } StatusOr<Mesh> Mesh::GetAbstractMesh( const std::string& name, const std::vector<MeshDimension>& mesh_dims) { Mesh mesh; mesh.mesh_type_ = MeshType::kTile; mesh.name_ = name; mesh.mesh_dims_ = mesh_dims; std::set<std::string> dims_set; for (const MeshDimension& dim : mesh.dims()) { if (dims_set.find(dim.name) != dims_set.end()) TF_RETURN_WITH_CONTEXT( absl::InvalidArgumentError("repeated mesh dimension")); if (dim.name == Layout::kAny || dim.name == Layout::kMatch || dim.name == Layout::kUnshardedDim) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("mesh dimension name ", dim.name, " is reserved"))); dims_set.insert(dim.name); } return mesh; } StatusOr<Mesh> Mesh::GetMesh(const std::string& name, const std::vector<MeshDimension>& mesh_dims, const std::vector<std::int64_t>& global_device_ids, const std::vector<std::int64_t>& local_device_ids, const std::vector<std::string>& local_devices, const std::vector<std::string>& global_devices, bool use_xla_spmd) { TF_ASSIGN_OR_RETURN(Mesh mesh, GetAbstractMesh(name, mesh_dims)); mesh.global_device_ids_ = global_device_ids; mesh.local_device_ids_ = local_device_ids; mesh.local_devices_ = local_devices; mesh.global_devices_ = global_devices; mesh.use_xla_spmd_ = use_xla_spmd; size_t global_n = mesh.global_device_ids_.size(); size_t local_n = mesh.local_device_ids_.size(); size_t dev_n = mesh.local_devices_.size(); if (!(global_n >= local_n && dev_n == local_n)) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError(absl::StrCat( "number of global_device_ids ", std::to_string(global_n), " local_devices ids ", std::to_string(local_n), " and local devices ", std::to_string(dev_n), "not meeting requirements"))); if (global_n == 0) return Mesh::Empty(); if (local_n && !(global_n % local_n == 0)) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError(absl::StrCat( "Uneven local clusters with global_ids ", std::to_string(global_n), " and local_devices ids ", std::to_string(local_n)))); if (mesh.size() != global_n) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( "mesh size doesn't match number of devices")); TF_ASSIGN_OR_RETURN(const auto& parsed_devs, mesh.ParsedDevices()); std::set<std::string> types_set; for (const DeviceNameUtils::ParsedName& dev : parsed_devs) { if (!dev.has_job || !dev.has_task || !dev.has_type) return absl::InvalidArgumentError( "Failed to either identify host or device type"); types_set.insert(dev.type); if (types_set.size() > 1) return absl::InvalidArgumentError(absl::StrCat( "More than one device type per mesh not supported. Found ", types_set.size())); } return mesh; } StatusOr<Mesh> Mesh::GetSingleDeviceMesh(absl::string_view single_device) { if (single_device.empty()) { return absl::InvalidArgumentError("Single device is empty."); } Mesh mesh; mesh.mesh_type_ = MeshType::kSingleDevice; mesh.single_device_ = single_device; return mesh; } StatusOr<int64_t> Mesh::dim_size(absl::string_view name) const { for (const auto& mesh_dim : dims()) { if (name == mesh_dim.name) { return mesh_dim.size; } } std::vector<std::string> dim_names; for (const auto& mesh_dim : dims()) dim_names.push_back(mesh_dim.name); return absl::NotFoundError( absl::StrCat("Dimension ", name, " does not exist in mesh.", "Available dimensions: ", absl::StrJoin(dim_names, ","))); } std::vector<int64_t> Mesh::dim_sizes() const { std::vector<int64_t> dim_sizes; if (mesh_dims_.empty()) return dim_sizes; for (const auto& mesh_dim : mesh_dims_) dim_sizes.push_back(mesh_dim.size); return dim_sizes; } bool Mesh::operator==(const Mesh& b) const { StatusOr<MeshProto> this_proto = ToProto(); StatusOr<MeshProto> b_proto = b.ToProto(); if (!this_proto.ok() || !b_proto.ok()) { return false; } return protobuf::util::MessageDifferencer::Equals(*this_proto, *b_proto); } bool Mesh::IsEmpty() const { return mesh_type_ == MeshType::kTile && global_device_ids_.empty(); } StatusOr<const std::vector<DeviceNameUtils::ParsedName>> Mesh::ParsedDevices() const { std::vector<DeviceNameUtils::ParsedName> parsed_devices( local_devices_.size()); for (std::size_t i = 0; i < local_devices_.size(); ++i) if (!DeviceNameUtils::ParseFullOrLocalName( absl::string_view(local_devices_[i]), &parsed_devices[i])) return absl::InvalidArgumentError("Failed to parse local_devices"); return parsed_devices; } StatusOr<Mesh> Mesh::ToDeviceType(const std::string& device_type) const { std::vector<std::string> to_local_devices; DeviceNameUtils::ParsedName parsed_dev; for (const std::string& local_dev : local_devices_) { if (!DeviceNameUtils::ParseFullOrLocalName(absl::string_view(local_dev), &parsed_dev)) { return absl::InvalidArgumentError("Failed to parse local devices"); } to_local_devices.push_back( DeviceNameUtils::FullName(parsed_dev.job, parsed_dev.replica, parsed_dev.task, device_type, parsed_dev.id)); parsed_dev.Clear(); } return GetMesh("", mesh_dims_, global_device_ids_, local_device_ids_, to_local_devices, {}); } namespace { std::string HostFromParsedDev(const DeviceNameUtils::ParsedName& dev) { return "/job:" + dev.job + "/task:" + std::to_string(dev.task); } } std::vector<std::string> Mesh::hosts() const { std::vector<std::string> host_list; if (IsEmpty()) return host_list; const auto parsed_devices = ParsedDevices().value(); for (const DeviceNameUtils::ParsedName& dev : parsed_devices) { std::string host = HostFromParsedDev(dev); if (std::find(host_list.begin(), host_list.end(), host) == host_list.end()) host_list.push_back(host); } return host_list; } std::string Mesh::device_type() const { if (IsEmpty()) return std::string(); std::string device; if (IsSingleDevice()) { device = single_device_; } else if (!global_devices_.empty()) { device = global_devices_[0]; } else { device = local_devices_[0]; } DeviceNameUtils::ParsedName dev; DeviceNameUtils::ParseFullOrLocalName(device, &dev); return dev.type; } bool Mesh::IsMeshDim(const std::string& dim_name) const { for (const auto& mesh_dim : dims()) if (dim_name == mesh_dim.name) return true; return false; } std::vector<std::string> Mesh::MeshDimNames() const { std::vector<std::string> mesh_names; for (const auto& mesh_dim : dims()) mesh_names.push_back(mesh_dim.name); return mesh_names; } int Mesh::GetMeshDimIndexWithName(const std::string& mesh_name) const { int mesh_index = -1; for (int i = 0; i < dims().size(); ++i) { const auto mesh_dim = dim(i); if (mesh_dim.name == mesh_name) mesh_index = i; } assert(mesh_index >= 0); return mesh_index; } int64 Mesh::rank() const { return mesh_dims_.size(); } int64 Mesh::size() const { if (mesh_dims_.empty()) return 0; int64 size = 1; for (const MeshDimension& dim : mesh_dims_) size *= dim.size; return size; } Mesh Mesh::Empty() { return Mesh(); } StatusOr<MeshProto> Mesh::ToProto() const { MeshProto mesh_proto; mesh_proto.set_name(name()); mesh_proto.set_use_xla_spmd(use_xla_spmd()); switch (mesh_type_) { case MeshType::kTile: { for (const auto& d : local_devices_) { mesh_proto.add_local_devices(d); } for (const auto& i : local_device_ids_) { mesh_proto.add_local_device_ids(i); } for (const auto& i : global_device_ids_) { mesh_proto.add_global_device_ids(i); } auto& mesh_dimensions = *mesh_proto.mutable_mesh_dimensions(); mesh_dimensions.Reserve(mesh_dims_.size()); for (const auto& dim : mesh_dims_) { MeshDimensionProto* mesh_dim_proto = mesh_dimensions.Add(); mesh_dim_proto->set_name(dim.name); mesh_dim_proto->set_size(dim.size); } for (const auto& d : global_devices_) { mesh_proto.add_global_devices(d); } break; } case MeshType::kSingleDevice: { *mesh_proto.mutable_single_device() = single_device_; break; } default: { return absl::InvalidArgumentError( absl::StrCat("Unsupported mesh type ", static_cast<int>(mesh_type_))); } } return mesh_proto; } std::string Mesh::ToString() const { if (Mesh::IsEmpty()) { return kEmptyMeshString; } if (mesh_type_ == MeshType::kSingleDevice) { return single_device_; } std::string mesh_str = absl::StrCat(Mesh::name(), "|"); absl::InlinedVector<std::string, 4> mesh_dim_lst; mesh_dim_lst.reserve(mesh_dims_.size()); for (const auto& dim : mesh_dims_) mesh_dim_lst.push_back(absl::StrCat(dim.name, "=", dim.size)); mesh_str += absl::StrJoin(mesh_dim_lst, ",") + "|"; mesh_str += absl::StrJoin(global_device_ids_, ",") + "|"; mesh_str += absl::StrJoin(local_device_ids_, ",") + "|"; mesh_str += absl::StrJoin(local_devices_, ","); if (!global_devices_.empty()) { mesh_str += "|"; mesh_str += absl::StrJoin(global_devices_, ","); } if (use_xla_spmd()) { mesh_str += "|"; mesh_str += Mesh::kUseXLASPMDString; } return mesh_str; } uint64 Mesh::GlobalFingerprint() const { if (Mesh::IsEmpty()) return Fingerprint64(kEmptyMeshString); std::string mesh_str; absl::InlinedVector<std::string, 4> mesh_dim_lst; mesh_dim_lst.reserve(mesh_dims_.size()); for (const auto& dim : mesh_dims_) mesh_dim_lst.push_back(absl::StrCat(dim.name, "=", dim.size)); mesh_str += absl::StrJoin(mesh_dim_lst, ",") + "|"; mesh_str += absl::StrJoin(global_device_ids_, ",") + "|"; if (!global_devices_.empty()) { mesh_str += "|"; mesh_str += absl::StrJoin(global_devices_, ","); } return Fingerprint64(mesh_str); } namespace { MeshDimension StrToMeshDimension(const std::string& str) { MeshDimension mesh_dim; if (str.empty()) return mesh_dim; std::vector<std::string> mesh_dim_parts = absl::StrSplit(str, '='); mesh_dim.name = mesh_dim_parts[0]; mesh_dim.size = std::stoi(mesh_dim_parts[1]); return mesh_dim; } StatusOr<Mesh> GenerateMeshDevicesForTests( const std::string& name, const std::vector<MeshDimension>& mesh_dims, const std::string& mesh_gen_instruction) { std::vector<std::string> instruction_parts = absl::StrSplit(mesh_gen_instruction, '*'); if (instruction_parts.size() != 2) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("Expected a * in mesh_gen_instructions but found ", mesh_gen_instruction))); std::string device_type = instruction_parts[1]; int64 mesh_size = 0; if (!mesh_dims.empty()) { mesh_size = 1; for (const MeshDimension& mesh_dim : mesh_dims) mesh_size *= mesh_dim.size; } std::vector<int64_t> global_device_ids; std::vector<int64_t> local_device_ids; std::vector<std::string> local_devices; for (std::size_t i = 0; i < mesh_size; ++i) { global_device_ids.push_back(i); local_device_ids.push_back(i); local_devices.push_back("/job:localhost/task:0/device:" + device_type + ":" + std::to_string(i)); } TF_ASSIGN_OR_RETURN( Mesh mesh, Mesh::GetMesh(name, mesh_dims, global_device_ids, local_device_ids, local_devices, {})); return mesh; } } StatusOr<Mesh> Mesh::FromString(absl::string_view str) { if (str == kEmptyMeshString) return Mesh::Empty(); std::vector<std::string> mesh_parts = absl::StrSplit(str, '|'); if (mesh_parts.size() == 1) { std::vector<std::string> single_device_parts = absl::StrSplit(mesh_parts[0], ':'); if (single_device_parts.size() != 5 && single_device_parts.size() != 6) { TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("Input string is invalid: ", mesh_parts[0]))); } Mesh mesh; mesh.mesh_type_ = MeshType::kSingleDevice; mesh.single_device_ = str; return mesh; } if (mesh_parts.size() != 3 && mesh_parts.size() != 5 && mesh_parts.size() != 6 && mesh_parts.size() != 7) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("Expected either 5, 6, 7 or 3 mesh parts but found", mesh_parts.size()))); std::string name = mesh_parts[0]; std::vector<MeshDimension> mesh_dims; if (!mesh_parts[1].empty()) { std::vector<std::string> mesh_dim_strs = absl::StrSplit(mesh_parts[1], ','); mesh_dims.reserve(mesh_dim_strs.size()); for (const std::string& mesh_dim_str : mesh_dim_strs) mesh_dims.push_back(StrToMeshDimension(mesh_dim_str)); } if (mesh_parts.size() == 3) return GenerateMeshDevicesForTests(name, mesh_dims, mesh_parts[2]); std::vector<int64_t> global_device_ids; if (!mesh_parts[2].empty()) { std::vector<std::string> global_device_ids_strs = absl::StrSplit(mesh_parts[2], ','); global_device_ids.reserve(global_device_ids_strs.size()); for (const std::string& id : global_device_ids_strs) global_device_ids.push_back(std::stoi(id)); } std::vector<int64_t> local_device_ids; if (!mesh_parts[3].empty()) { std::vector<std::string> local_device_ids_strs = absl::StrSplit(mesh_parts[3], ','); local_device_ids.reserve(local_device_ids_strs.size()); for (const std::string& id : local_device_ids_strs) local_device_ids.push_back(std::stoi(id)); } std::vector<std::string> local_devices; if (!mesh_parts[4].empty()) local_devices = absl::StrSplit(mesh_parts[4], ','); bool use_xla_spmd = Mesh::kUseXLASPMD; std::vector<std::string> global_devices; if (mesh_parts.size() == 6 && !mesh_parts[5].empty()) { if (mesh_parts[5] == Mesh::kUseXLASPMDString) { use_xla_spmd = true; } else { global_devices = absl::StrSplit(mesh_parts[5], ','); } } if (mesh_parts.size() == 7 && !mesh_parts[6].empty()) { if (mesh_parts[6] == Mesh::kUseXLASPMDString) { use_xla_spmd = true; } else { return absl::InvalidArgumentError( absl::StrCat("Expected string ", Mesh::kUseXLASPMDString, "as the 7th argument but got: ", mesh_parts[6])); } } TF_ASSIGN_OR_RETURN( Mesh mesh, Mesh::GetMesh(name, mesh_dims, global_device_ids, local_device_ids, local_devices, global_devices, use_xla_spmd)); return mesh; } int64 Mesh::num_devices() const { return global_device_ids_.size(); } StatusOr<const DeviceLocation> Mesh::device_location(int offset) const { if (offset < 0 || offset > size() - 1) return absl::InvalidArgumentError(absl::StrCat( "Mesh offset cannot be negative or exceed Mesh's size. Offset size:", offset, " and Mesh size:", size())); DeviceLocation dev_loc; std::vector<int64> mesh_dim_lengths = dim_sizes(); int64 i = mesh_dim_lengths.size() - 1; while (i >= 0) { dev_loc.insert(dev_loc.begin(), offset % mesh_dim_lengths[i]); offset /= mesh_dim_lengths[i]; --i; } return dev_loc; } int64 Mesh::GetFlattenedCoordinate(const DeviceLocation& loc) const { const std::vector<int64> mesh_dim_sizes = dim_sizes(); int64 i = mesh_dim_sizes.size() - 1; int64 acc = 1; int64 device_pos = 0; while (i >= 0) { device_pos += loc[i] * acc; acc *= mesh_dim_sizes[i]; --i; } return device_pos; } StatusOr<int32> Mesh::idx_for_dim(absl::string_view dim_name) const { for (int i = 0; i < mesh_dims_.size(); ++i) { if (mesh_dims_[i].name == dim_name) return i; } return absl::InvalidArgumentError(absl::StrCat( "dim name :", dim_name, " does not exist on mesh : ", ToString())); } Mesh Mesh::CreateMesh(const std::string& mesh_name, const std::vector<std::string>& dim_names, const std::vector<std::int64_t>& mesh_shape, const std::vector<std::int64_t>& global_device_ids, const std::vector<std::string>& global_devices_str, const std::vector<std::int64_t>& local_device_ids, const std::vector<std::string>& local_devices_str, const bool use_xla_spmd) { Mesh mesh; mesh.mesh_type_ = MeshType::kTile; mesh.name_ = mesh_name; mesh.use_xla_spmd_ = use_xla_spmd; mesh.mesh_dims_.resize(dim_names.size()); for (int i = 0; i < dim_names.size(); ++i) { mesh.mesh_dims_[i].name = dim_names[i]; mesh.mesh_dims_[i].size = mesh_shape[i]; } for (const auto& id : global_device_ids) { mesh.global_device_ids_.push_back(id); } for (const auto& d : global_devices_str) { mesh.global_devices_.push_back(d); } for (const auto& id : local_device_ids) { mesh.local_device_ids_.push_back(id); } for (const auto& d : local_devices_str) { mesh.local_devices_.push_back(d); } return mesh; } StatusOr<Layout> Layout::GetLayout( LayoutType type, const std::vector<std::string>& sharding_specs, const Mesh& mesh) { Layout layout; layout.type_ = type; layout.mesh_ = mesh; if ((type == LayoutType::kSingleDevice) != mesh.IsSingleDevice()) { TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( "type is single device, but mesh is not single device")); } if ((type == LayoutType::kEmpty) != mesh.IsEmpty()) { TF_RETURN_WITH_CONTEXT( absl::InvalidArgumentError("type is empty, but mesh is not empty")); } for (const auto& sharding_spec : sharding_specs) { if (!(sharding_spec == kUnshardedDim || sharding_spec == kAny || sharding_spec == kMatch || mesh.IsMeshDim(sharding_spec) || sharding_spec == "scalar")) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("sharding spec (", sharding_spec, ") refers to mesh dimension not contained in mesh ", mesh.ToString()))); } std::set<std::string> dims_set; for (const auto& sharding_spec : sharding_specs) { if (sharding_spec == kUnshardedDim || sharding_spec == kAny) continue; if (sharding_spec == "scalar") { if (sharding_specs.size() > 1) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError(absl::StrCat( "A scalar sharding_spec can only be used as a single sharding_spec " "instruction, not as part of list of sharding_specs as attempted " "here with ", sharding_specs.size(), " sharding_specs"))) return layout; } if (dims_set.find(sharding_spec) != dims_set.end()) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("Attempted to shard two or more tensor " "dimensions over mesh dimension ", sharding_spec))); dims_set.insert(sharding_spec); } layout.sharding_specs_ = sharding_specs; return layout; } Layout Layout::Empty() { Layout result; result.type_ = LayoutType::kEmpty; return result; } bool Layout::IsEmpty() const { return type_ == LayoutType::kEmpty; } namespace { Mesh ReducedAbstractMesh(const Layout* layout) { const std::vector<std::string> shard_spec_strs = layout->sharding_spec_strs(); std::vector<MeshDimension> reduced_mesh_dims; reduced_mesh_dims.reserve(layout->mesh().dims().size()); for (const MeshDimension& mesh_dim : layout->mesh().dims()) { bool IsMeshDimInShardingSpecs = std::find(shard_spec_strs.begin(), shard_spec_strs.end(), mesh_dim.name) != shard_spec_strs.end(); MeshDimension reduced_dim = IsMeshDimInShardingSpecs ? mesh_dim : MeshDimension(mesh_dim.name, 1); reduced_mesh_dims.push_back(reduced_dim); } return Mesh::GetAbstractMesh("", reduced_mesh_dims).value(); } } Mesh Layout::ReducedMesh() const { Mesh reduced_mesh = ReducedAbstractMesh(this); std::vector<int64_t> reduced_global_device_ids; std::vector<std::string> reduced_global_devs; for (const DeviceLocation& loc : ComputeDeviceLocations(reduced_mesh)) { int64 pos = mesh().GetFlattenedCoordinate(loc); reduced_global_device_ids.push_back(mesh().global_device_ids().at(pos)); if (!mesh().global_devices().empty()) { reduced_global_devs.push_back(mesh().global_devices().at(pos)); } } std::set<int64_t> reduced_global_device_ids_set( reduced_global_device_ids.begin(), reduced_global_device_ids.end()); std::vector<int64_t> reduced_local_device_ids; std::vector<std::string> reduced_local_devs; for (size_t i = 0; i < mesh().local_device_ids().size(); ++i) { int64_t device_id = mesh().local_device_ids().at(i); if (reduced_global_device_ids_set.find(device_id) != reduced_global_device_ids_set.end()) { reduced_local_device_ids.push_back(device_id); reduced_local_devs.push_back(mesh().local_devices().at(i)); } } return Mesh::GetMesh(reduced_mesh.name(), reduced_mesh.dims(), reduced_global_device_ids, reduced_local_device_ids, reduced_local_devs, reduced_global_devs) .value(); } namespace { Layout ReducedLayout(const Layout* layout) { return Layout::GetLayout(layout->sharding_spec_strs(), layout->ReducedMesh()) .value(); } StatusOr<int> IndexOfMeshDimension(const Mesh& mesh, const std::string& dim_name) { for (size_t i = 0; i < mesh.dims().size(); ++i) if (dim_name == mesh.dims()[i].name) return i; return absl::InvalidArgumentError("Mesh dimension not found"); } } ShardVector Layout::GetShardVector() const { auto GetShardFromDeviceLocation = [&](const DeviceLocation& loc) -> Shard { Shard shard; for (size_t i = 0; i < sharding_specs_.size(); ++i) { std::string spec = sharding_specs_[i]; if (spec == Layout::kUnshardedDim) { shard.push_back(1); } else { int mesh_index = IndexOfMeshDimension(mesh(), sharding_spec(i)).value(); int shard_number = loc[mesh_index] + 1; shard.push_back(shard_number); } } return shard; }; auto ShardVectorDims = [&]() -> std::vector<int> { std::vector<int> num_shards_per_dim(sharding_specs_.size()); for (size_t i = 0; i < sharding_specs_.size(); ++i) { std::string spec = sharding_specs_[i]; if (Layout::IsShardedDimension(spec)) { StatusOr<int64> dim_size = mesh().dim_size(spec); num_shards_per_dim[i] = dim_size.value(); } else { num_shards_per_dim[i] = 1; } } return num_shards_per_dim; }; ShardVector shard_vec; for (const DeviceLocation& mesh_loc : ComputeDeviceLocations(mesh())) shard_vec.shards.push_back(GetShardFromDeviceLocation(mesh_loc)); shard_vec.num_shards_per_dim = ShardVectorDims(); return shard_vec; } std::map<std::string, ShardVector> Layout::HostShardMap() const { Layout reduced_layout = ReducedLayout(this); Mesh reduced_mesh = reduced_layout.mesh(); using HostName = std::string; std::map<HostName, ShardVector> host_shards_map; ShardVector shard_vec_in_red_layout = reduced_layout.GetShardVector(); const auto parsed_devs = reduced_mesh.ParsedDevices().value(); for (size_t i = 0; i < parsed_devs.size(); ++i) { HostName host = HostFromParsedDev(parsed_devs[i]); Shard shard_in_device = shard_vec_in_red_layout.shards[i]; auto it = host_shards_map.find(host); if (it == host_shards_map.end()) { ShardVector shard_vec_in_host; shard_vec_in_host.shards.push_back(shard_in_device); shard_vec_in_host.num_shards_per_dim = shard_vec_in_red_layout.num_shards_per_dim; host_shards_map.insert( std::pair<HostName, ShardVector>(host, shard_vec_in_host)); } else { bool isShardInShardVector = it->second.ContainsShard(shard_in_device); if (!isShardInShardVector) { it->second.shards.push_back(shard_in_device); } } } for (auto it = host_shards_map.begin(); it != host_shards_map.end(); ++it) { std::sort(it->second.shards.begin(), it->second.shards.end()); } return host_shards_map; } const std::string& Layout::sharding_spec(int idx) const { return sharding_specs_[idx]; } std::vector<int32> Layout::num_shards() const { std::vector<int32> num_shards; num_shards.reserve(sharding_specs_.size()); for (int64_t index = 0; index < sharding_specs_.size(); ++index) { num_shards.push_back(num_shards_for_dim(index)); } return num_shards; } size_t Layout::num_shards_for_dim(int dim) const { const std::string spec = sharding_specs_[dim]; if (spec == Layout::kUnshardedDim) return 1; if (spec == Layout::kMatch) return -1; return mesh().dim_size(spec).value(); } bool Layout::IsFullyReplicated() const { if (!mesh_.IsTile()) { return false; } for (const auto& sharding_spec : sharding_specs_) { if (sharding_spec != Layout::kUnshardedDim) return false; } return true; } bool Layout::IsLastDimReplicated() const { return (mesh_.IsTile() && ((sharding_specs_.empty()) || (sharding_specs_.back() == Layout::kUnshardedDim))); } bool Layout::IsBatchParallel() const { if (!mesh_.IsTile()) { return false; } if (sharding_specs_.empty()) { return false; } for (int i = 1; i < sharding_specs_.size(); ++i) { const auto& spec = sharding_specs_[i]; if (spec != Layout::kUnshardedDim) { return false; } } return sharding_specs_[0] != Layout::kUnshardedDim; } bool Layout::IsBatchParallel(int non_batch_rank) const { if (!mesh_.IsTile()) { return false; } if (sharding_specs_.empty()) return true; for (int i = rank() - non_batch_rank; i < rank(); ++i) { if (num_shards_for_dim(i) != 1) return false; } return true; } StatusOr<LayoutProto> Layout::ToProto() const { LayoutProto proto; TF_ASSIGN_OR_RETURN(*proto.mutable_mesh_config(), mesh_.ToProto()); for (const auto& spec : sharding_specs_) { proto.add_sharding_specs()->set_sharding_spec(spec); } if (IsEmpty()) { proto.set_type(LayoutProto::UNKNOWN); } else { switch (type_) { case LayoutType::kSingleDevice: proto.set_type(LayoutProto::SINGLE_DEVICE); break; case LayoutType::kStatic: proto.set_type(LayoutProto::STATIC); break; case LayoutType::kParted: proto.set_type(LayoutProto::PARTED); break; default: proto.set_type(LayoutProto::UNKNOWN); break; } } return proto; } bool Layout::IsEquivalent(const Layout& b) const { if (this->type() != b.type()) return false; return IsEquivalentIgnoringType(b); } bool Layout::IsEquivalentIgnoringType(const Layout& b) const { if (this->rank() != b.rank()) return false; if (this->mesh() != b.mesh()) return false; for (int i = 0; i < this->rank(); ++i) { if (this->sharding_specs_[i] != b.sharding_specs_[i]) { if ((this->num_shards_for_dim(i) != 1) || (b.num_shards_for_dim(i) != 1)) return false; } } return true; } bool Layout::operator==(const Layout& b) const { StatusOr<LayoutProto> this_proto = ToProto(); StatusOr<LayoutProto> b_proto = b.ToProto(); if (!this_proto.ok() || !b_proto.ok()) { return false; } return protobuf::util::MessageDifferencer::Equals(*this_proto, *b_proto); } std::vector<int64_t> Layout::GlobalShapeFromLocalShape( absl::Span<const int64_t> local_shape, const std::vector<std::vector<int64_t>>* local_shapes) const { if (IsSingleDevice() || IsFullyReplicated()) { if (IsDynamicShape(local_shape) && local_shapes) { return local_shapes->at(0); } else { return std::vector<int64_t>(local_shape.begin(), local_shape.end()); } } std::vector<int64_t> stride_for_dim; stride_for_dim.resize(sharding_specs_.size()); size_t stride = mesh().num_local_devices(); for (int i = 0; i < stride_for_dim.size(); i++) { stride = stride / num_shards_for_dim(i); stride_for_dim[i] = stride; } auto dimension_size = [&, this](int dim) -> int64_t { int64_t local_size = local_shape[dim]; if (IsDynamicSize(local_size) && local_shapes && local_shapes->size() == mesh().num_local_devices()) { int64_t dim_size = 0; int index = 0; for (int i = 0; i < num_shards_for_dim(dim); i++) { dim_size += local_shapes->at(index)[dim]; index += stride_for_dim[dim]; } return dim_size; } else { return local_size * num_shards_for_dim(dim); } }; std::vector<int64_t> global_shape; global_shape.reserve(sharding_specs_.size()); for (int i = 0; i < sharding_specs_.size(); ++i) { global_shape.push_back(dimension_size(i)); } return global_shape; } std::vector<int64_t> Layout::LocalShapeFromGlobalShape( absl::Span<const int64_t> global_shape) const { if (IsFullyReplicated()) { return std::vector<int64_t>(global_shape.begin(), global_shape.end()); } std::vector<int32> shards = num_shards(); std::vector<int64_t> local_shape; for (int i = 0; i < sharding_specs_.size(); ++i) { int64_t dim_shards = shards[i]; int64_t local_size = IsDynamicSize(global_shape[i]) ? global_shape[i] : global_shape[i] / dim_shards; local_shape.emplace_back(local_size); } return local_shape; } PartialTensorShape Layout::LocalShapeFromGlobalShape( const PartialTensorShape& global_shape) const { if (IsFullyReplicated() || global_shape.dims() == -1) { return global_shape; } std::vector<int32> shards = num_shards(); PartialTensorShape local_shape({}); for (int spec_index = 0; spec_index < sharding_specs_.size(); ++spec_index) { int64_t dim_size = global_shape.dim_size(spec_index); int64_t local_size = IsDynamicSize(dim_size) ? dim_size : dim_size / shards[spec_index]; local_shape.AddDim(local_size); } return local_shape; } StatusOr<Layout> Layout::FromProto(const LayoutProto& proto) { std::vector<std::string> sharding_specs; for (const auto& spec : proto.sharding_specs()) sharding_specs.push_back(spec.sharding_spec()); TF_ASSIGN_OR_RETURN(auto mesh, Mesh::ParseFromProto(proto.mesh_config())); LayoutType type; switch (proto.type()) { case LayoutProto::UNKNOWN: type = LayoutType::kEmpty; break; case LayoutProto::SINGLE_DEVICE: type = LayoutType::kSingleDevice; break; case LayoutProto::STATIC: type = LayoutType::kStatic; break; case LayoutProto::PARTED: type = LayoutType::kParted; break; default: return absl::InvalidArgumentError(absl::StrCat( "value for type field of layout protobuf is not supported.", proto.DebugString())); } return GetLayout(type, sharding_specs, mesh); } Layout Layout::ReplicatedOnMesh(const Mesh& mesh, int rank) { std::vector<std::string> specs(rank, kUnshardedDim); return Layout::GetLayout(specs, mesh).value(); } Layout Layout::ReplicatedLike(const Layout& layout) { std::vector<std::string> specs(layout.rank(), kUnshardedDim); return Layout::GetLayout(specs, layout.mesh()).value(); } Layout Layout::BatchShardedOnMesh(const Mesh& mesh, int rank, const string& mesh_dim, int axis) { std::vector<std::string> specs(rank, kUnshardedDim); specs[axis] = mesh_dim; return Layout::GetLayout(specs, mesh).value(); } Layout Layout::BatchShardedLike(const Layout& layout, const string& mesh_dim, int axis) { std::vector<std::string> specs(layout.rank(), kUnshardedDim); specs[axis] = mesh_dim; return Layout::GetLayout(specs, layout.mesh()).value(); } Layout Layout::AnyOnMesh(const Mesh& mesh, int rank) { std::vector<std::string> specs(rank, kAny); return Layout::GetLayout(specs, mesh).value(); } StatusOr<Layout> Layout::Transposed2D(const Layout& layout) { if (layout.rank() < 2) { return absl::InvalidArgumentError("Transposed2D requires rank to be >= 2"); } std::vector<std::string> transposed_specs = layout.sharding_spec_strs(); std::iter_swap(transposed_specs.end() - 2, transposed_specs.end() - 1); return Layout::GetLayout(transposed_specs, layout.mesh()).value(); } StatusOr<Layout> Layout::FromString(absl::string_view layout_str) { if (layout_str == kEmptyLayoutString) return Layout::Empty(); std::vector<absl::string_view> layout_parts = absl::StrSplit(layout_str, ' '); if (layout_parts.size() != 2) { TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError(absl::StrCat( "Expected 2 items but found ", layout_parts.size(), layout_parts[0]))); } absl::string_view mesh_str = layout_parts[1]; absl::ConsumePrefix(&mesh_str, "mesh:"); TF_ASSIGN_OR_RETURN(Mesh mesh, Mesh::FromString(mesh_str)); LayoutType type = LayoutType::kStatic; absl::string_view sharding_spec_str = layout_parts[0]; if (absl::ConsumePrefix(&sharding_spec_str, kSingleDevicePrefix)) { type = LayoutType::kSingleDevice; } else if (absl::ConsumePrefix(&sharding_spec_str, kPartedPrefix)) { type = LayoutType::kParted; } else if (absl::ConsumePrefix(&sharding_spec_str, kStaticPrefix)) { type = LayoutType::kStatic; } else { TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError(absl::StrCat( "Unknown layout type prefix", sharding_spec_str, " in ", layout_str))); } const bool has_sharding_spec = (type == LayoutType::kParted) || (type == LayoutType::kStatic); std::vector<std::string> sharding_spec_strs; if (has_sharding_spec) { sharding_spec_strs = absl::StrSplit(sharding_spec_str, ','); sharding_spec_strs.pop_back(); } return Layout::GetLayout(type, sharding_spec_strs, mesh); } std::vector<std::string> Layout::sharding_spec_strs() const { return sharding_specs_; } std::string Layout::ToString() const { std::string layout_str; switch (type_) { case LayoutType::kEmpty: return kEmptyLayoutString; case LayoutType::kSingleDevice: absl::StrAppend(&layout_str, kSingleDevicePrefix); absl::StrAppend(&layout_str, "true,"); break; case LayoutType::kStatic: absl::StrAppend(&layout_str, kStaticPrefix); break; case LayoutType::kParted: absl::StrAppend(&layout_str, kPartedPrefix); break; } for (const auto& dim_name : sharding_specs_) { absl::StrAppend(&layout_str, dim_name + ","); } absl::StrAppend(&layout_str, " mesh:", mesh_.ToString()); return layout_str; } StatusOr<Layout> Layout::GetLayoutWithReducedDims( const absl::flat_hash_set<int>& reduced_dims, bool keep_dims) const { std::vector<std::string> sharding_specs; for (int i = 0; i < rank(); ++i) { if (!reduced_dims.contains(i) && !reduced_dims.contains(i - rank())) { sharding_specs.push_back(sharding_spec(i)); } else if (keep_dims) { sharding_specs.push_back(kUnshardedDim); } } return Layout::GetLayout(sharding_specs, mesh()); } Layout Layout::Truncate(int64 split_point, bool end) const { if ((split_point == 0 && end) || (split_point == rank() && !end)) return *this; Layout output_layout(*this); auto& specs = output_layout.sharding_specs_; if (end) { specs.erase(specs.begin(), specs.begin() + split_point); } else { specs.resize(split_point); } return output_layout; } Layout Layout::LeftPad(int64_t rank) const { if (rank <= this->rank()) return *this; Layout output_layout(*this); auto& specs = output_layout.sharding_specs_; specs.insert(specs.begin(), rank - this->rank(), Layout::kUnshardedDim); return output_layout; } StatusOr<Layout> ConcatenateLayouts(const Layout& layout_a, const Layout& layout_b) { if (layout_a.mesh() != layout_b.mesh()) return absl::InvalidArgumentError( "unable to concatenate layouts as they are on different meshes."); absl::flat_hash_set<std::string> layout_a_mesh_dims; for (int i = 0; i < layout_a.rank(); ++i) if (layout_a.sharding_spec(i) != Layout::kUnshardedDim) layout_a_mesh_dims.emplace(layout_a.sharding_spec(i)); for (int i = 0; i < layout_b.rank(); ++i) if (layout_b.sharding_spec(i) != Layout::kUnshardedDim && layout_a_mesh_dims.contains(layout_b.sharding_spec(i))) return absl::InvalidArgumentError(absl::StrCat( "unable to concatenate layouts as they use the same meshes " "dimension: ", layout_b.sharding_spec(i), " is used in both layouts.")); std::vector<std::string> sharding_specs; sharding_specs.reserve(layout_a.rank() + layout_b.rank()); for (int i = 0; i < layout_a.rank(); ++i) sharding_specs.push_back(layout_a.sharding_spec(i)); for (int i = 0; i < layout_b.rank(); ++i) sharding_specs.push_back(layout_b.sharding_spec(i)); return Layout::GetLayout(layout_a.type(), sharding_specs, layout_a.mesh()); } StatusOr<Layout> GetMostShardedLayout(const std::vector<Layout>& layouts) { if (layouts.empty()) return absl::InvalidArgumentError("Layout should not be empty"); absl::flat_hash_map<std::string, std::set<int>> layout_map; for (const Layout& layout : layouts) { for (int i = 0; i < layout.rank(); ++i) { const std::string& mesh_dim = layout.sharding_spec(i); if (mesh_dim == Layout::kUnshardedDim) continue; layout_map[mesh_dim].insert(i); } } for (auto& it : layout_map) if (it.second.size() > 1) it.second.clear(); std::map<int, std::set<std::string>> dim_to_layout_map; for (const auto& it : layout_map) { assert(it.second.size() <= 1); if (it.second.empty()) continue; const int tensor_dim_index = *it.second.begin(); dim_to_layout_map[tensor_dim_index].insert(it.first); } for (auto& it : dim_to_layout_map) if (it.second.size() > 1) it.second.clear(); std::vector<std::string> merged_spec; assert(!layouts.empty()); for (int i = 0; i < layouts[0].rank(); ++i) { const auto it = dim_to_layout_map.find(i); if (it != dim_to_layout_map.end() && !it->second.empty()) { assert(it->second.size() == 1); merged_spec.emplace_back(*it->second.begin()); } else { merged_spec.emplace_back(Layout::kUnshardedDim); } } return Layout::GetLayout(merged_spec, layouts[0].mesh()); } StatusOr<Layout> GetLeastShardedLayout(const std::vector<Layout>& layouts) { if (layouts.empty()) return absl::InvalidArgumentError("Layout should not be empty"); int rank = -1; std::vector<std::string> specs; for (const auto& layout : layouts) { if (rank == -1) { rank = layout.rank(); } else { if (rank != layout.rank()) { return absl::InvalidArgumentError( "Not all layouts to GetLeastShardedLayout are of the same rank."); } } } specs.resize(rank, Layout::kAny); for (const auto& layout : layouts) { const auto current_specs = layout.sharding_spec_strs(); for (int i = 0; i < rank; i++) { auto current_spec = current_specs[i]; if (specs[i] == Layout::kAny) { specs[i] = current_spec; } else if (specs[i] != current_spec) { specs[i] = Layout::kUnshardedDim; } } } return Layout::GetLayout(specs, layouts[0].mesh()); } } }
#include "tensorflow/dtensor/cc/tensor_layout.h" #include <map> #include <memory> #include <ostream> #include <string> #include <vector> #include <gmock/gmock.h> #include "absl/container/inlined_vector.h" #include "absl/strings/match.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/proto/layout.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace dtensor { namespace { using ::testing::ContainsRegex; using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::SizeIs; using ::tsl::testing::IsOkAndHolds; using ::tsl::testing::StatusIs; class ProtoStringMatcher { public: explicit ProtoStringMatcher(const tensorflow::protobuf::Message& expected) : expected_(expected.SerializeAsString()) {} template <typename Message> bool MatchAndExplain(const Message& p, ::testing::MatchResultListener*) const { return p.SerializeAsString() == expected_; } void DescribeTo(::std::ostream* os) const { *os << expected_; } void DescribeNegationTo(::std::ostream* os) const { *os << "not equal to expected message: " << expected_; } private: const std::string expected_; }; inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto( const tensorflow::protobuf::Message& x) { return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x)); } class LayoutTest : public ::testing::Test { protected: Layout BatchLayout() { return Layout::FromString("sharding_specs:x,batch, mesh:|x=4,batch=8|*TPU") .value(); } }; TEST(MeshTest, FromStringEmptyMesh) { Mesh mesh = Mesh::Empty(); std::string mesh_str = mesh.ToString(); EXPECT_EQ(mesh_str, Mesh::kEmptyMeshString); } TEST(MeshTest, FromStringMeshWithGlobalDevices) { StatusOr<Mesh> mesh = Mesh::FromString( "mesh:|x=2|0,1|0|/job:localhost/task:0/device:CPU:0|/job:localhost/" "task:0/device:CPU:0,/job:localhost/task:0/device:CPU:1"); EXPECT_THAT(mesh->global_devices(), ElementsAre("/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1")); } TEST(MeshTest, FromStringMeshWithXLASPMDAndGlobalDevices) { StatusOr<Mesh> mesh = Mesh::FromString( "mesh:|x=2|0,1|0|/job:localhost/task:0/device:CPU:0|/job:localhost/" "task:0/device:CPU:1|use_xla_spmd"); EXPECT_TRUE(mesh->use_xla_spmd()); } TEST(MeshTest, FromStringMeshWithXLASPMD) { StatusOr<Mesh> mesh = Mesh::FromString( "mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0|use_xla_spmd"); EXPECT_TRUE(mesh->use_xla_spmd()); } TEST(MeshTest, FromStringMeshWithoutXLASPMD) { StatusOr<Mesh> mesh = Mesh::FromString("mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0"); EXPECT_FALSE(mesh->use_xla_spmd()); } TEST(MeshTest, ToStringMeshWithoutXLASPMD) { Mesh mesh = Mesh::CreateMesh("MyMesh", {"x"}, {2}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, false); EXPECT_TRUE(!absl::StrContains(mesh.ToString(), Mesh::kUseXLASPMDString)); } TEST(MeshTest, ToStringMeshWithXLASPMD) { Mesh mesh = Mesh::CreateMesh("MyMesh", {"x"}, {2}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, true); EXPECT_THAT(mesh.ToString(), ContainsRegex(Mesh::kUseXLASPMDString)); } TEST(MeshTest, FromStringInvalidSingleDeviceMesh) { EXPECT_THAT(Mesh::FromString("/job:localhost/device:CPU:0"), StatusIs(tsl::error::INVALID_ARGUMENT)); } TEST(MeshTest, FromStringSingleDeviceMesh) { TF_ASSERT_OK_AND_ASSIGN( Mesh mesh, Mesh::FromString("/job:localhost/task:0/device:CPU:0")); EXPECT_EQ(mesh.ToString(), "/job:localhost/task:0/device:CPU:0"); } TEST_F(LayoutTest, FromStringEmptyLayout) { Layout layout = Layout::Empty(); std::string layout_str = layout.ToString(); TF_ASSERT_OK_AND_ASSIGN(Layout layout_from_str, Layout::FromString(layout_str)); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_from_str_proto, layout_from_str.ToProto()); EXPECT_THAT(layout.ToProto(), IsOkAndHolds(EqualsProto(layout_from_str_proto))); } TEST_F(LayoutTest, LayoutToFromString) { Layout layout = BatchLayout(); std::string layout_str = layout.ToString(); TF_ASSERT_OK_AND_ASSIGN(Layout layout_from_str, Layout::FromString(layout_str)); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_from_str_proto, layout_from_str.ToProto()); EXPECT_THAT(layout.ToProto(), IsOkAndHolds(EqualsProto(layout_from_str_proto))); } TEST_F(LayoutTest, LayoutToFromStringNotSharded) { std::string layout_str = "sharding_specs:x," + string(Layout::kUnshardedDim) + ", mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0"; EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, LayoutToFromStringAny) { std::string layout_str = "sharding_specs:any, mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0"; EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, LayoutToFromStringSingleDevice) { std::string layout_str = "maximal:true, mesh:/job:localhost/task:0/device:CPU:0"; EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, AutoGenerateLayout) { std::string layout_str = "sharding_specs:x, mesh:|x=2,y=2|*CPU"; std::string exp_layout_str = "sharding_specs:x, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/" "job:localhost/task:0/device:CPU:3"; EXPECT_EQ(exp_layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, MeshToFromString) { Mesh mesh = BatchLayout().mesh(); std::string mesh_str = mesh.ToString(); TF_ASSERT_OK_AND_ASSIGN(Mesh mesh_from_str, Mesh::FromString(mesh_str)); TF_ASSERT_OK_AND_ASSIGN(MeshProto mesh_from_str_proto, mesh_from_str.ToProto()); EXPECT_THAT(mesh.ToProto(), IsOkAndHolds(EqualsProto(mesh_from_str_proto))); } TEST_F(LayoutTest, GetType) { Mesh mesh = BatchLayout().mesh(); EXPECT_TRUE(mesh.is_tpu_mesh()); } TEST_F(LayoutTest, OnTPUMesh) { Layout layout = BatchLayout(); EXPECT_TRUE(layout.mesh().is_tpu_mesh()); } TEST_F(LayoutTest, NumShardsAsVector) { std::vector<int32> shards = {4, 8}; EXPECT_EQ(BatchLayout().num_shards(), shards); } TEST_F(LayoutTest, IsReplicated) { EXPECT_FALSE(BatchLayout().IsFullyReplicated()); } TEST_F(LayoutTest, MeshDeviceLocations) { Layout layout = BatchLayout(); absl::InlinedVector<int64, 4> offset = {1, 2}; EXPECT_THAT(layout.mesh().device_location(10), IsOkAndHolds(offset)); offset = {2, 2}; EXPECT_THAT(layout.mesh().device_location(18), IsOkAndHolds(offset)); offset = {3, 7}; EXPECT_THAT(layout.mesh().device_location(31), IsOkAndHolds(offset)); EXPECT_FALSE(layout.mesh().device_location(32).ok()); EXPECT_FALSE(layout.mesh().device_location(-1).ok()); } TEST_F(LayoutTest, ScalarLayout) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:scalar, mesh:|x=4,y=4|*TPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_TRUE(layout.mesh().is_tpu_mesh()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); EXPECT_EQ(layout.rank(), 0); } TEST_F(LayoutTest, ParseSimpleTpuMesh) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x, mesh:|x=4,y=4|*TPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_TRUE(layout.mesh().is_tpu_mesh()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); } TEST_F(LayoutTest, ParseSimpleCpuMesh) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x,unsharded, mesh:|x=4,y=4|*CPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_FALSE(layout.mesh().is_tpu_mesh()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); } TEST_F(LayoutTest, ParseFailsOnRepeatedShardingSpec) { StatusOr<Layout> maybe_layout = Layout::FromString("sharding_specs:x,x, mesh:|x=1,y=2|*CPU"); EXPECT_FALSE(maybe_layout.ok()); } TEST_F(LayoutTest, ParseFailsOnInvalidScalarShardingSpec) { StatusOr<Layout> maybe_layout = Layout::FromString("sharding_specs:x,scalar, mesh:|x=1,y=2|*CPU"); EXPECT_FALSE(maybe_layout.ok()); } TEST_F(LayoutTest, ParseFailsOnShardingSpecOverNonExistentMeshDim) { StatusOr<Layout> maybe_layout = Layout::FromString("sharding_specs:x,z, mesh:|x=1,y=2|*CPU"); EXPECT_FALSE(maybe_layout.ok()); } TEST_F(LayoutTest, ParseFailsOnBadDeviceString) { auto layout = Layout::FromString("sharding_specs:x,unsharded, d:TPU mesh:x=4,y=4"); EXPECT_FALSE(layout.ok()) << layout.status(); } TEST_F(LayoutTest, ParseReplicatedLayout) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString( "sharding_specs:unsharded,unsharded, mesh:|x=4,y=4|*CPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_FALSE(layout.mesh().is_tpu_mesh()); EXPECT_TRUE(layout.IsFullyReplicated()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); } TEST_F(LayoutTest, SingleHostFullyReplicatedReducedMesh) { TF_ASSERT_OK_AND_ASSIGN( Layout replicated_layout, Layout::FromString( "sharding_specs:unsharded,unsharded, mesh:|x=2,y=2|*CPU")); Mesh reduced_mesh = replicated_layout.ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 1); EXPECT_THAT(reduced_mesh.hosts(), SizeIs(1)); } TEST_F(LayoutTest, SingleHostFullShardedReducedMesh) { Layout layout = BatchLayout(); Mesh original_mesh = layout.mesh(); Mesh reduced_mesh = layout.ReducedMesh(); EXPECT_EQ(original_mesh.ToString(), reduced_mesh.ToString()); EXPECT_EQ(reduced_mesh.size(), 32); EXPECT_THAT(reduced_mesh.hosts(), SizeIs(1)); } TEST_F(LayoutTest, MultiHostReplicatedReducedMesh) { StatusOr<Layout> layout = Layout::FromString( "sharding_specs:unsharded,unsharded, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); Mesh reduced_mesh = layout->ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 1); EXPECT_THAT(reduced_mesh.global_device_ids(), ElementsAre(0)); EXPECT_THAT(reduced_mesh.local_device_ids(), IsEmpty()); EXPECT_THAT(reduced_mesh.local_devices(), IsEmpty()); EXPECT_THAT(reduced_mesh.hosts(), IsEmpty()); } TEST_F(LayoutTest, MultiHostPartiallyShardedReducedMesh) { StatusOr<Layout> layout = Layout::FromString( "sharding_specs:x,unsharded, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); Mesh reduced_mesh = layout->ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 4); EXPECT_THAT(reduced_mesh.global_device_ids(), ElementsAre(0, 2, 4, 6)); EXPECT_THAT(reduced_mesh.local_device_ids(), ElementsAre(4, 6)); EXPECT_THAT(reduced_mesh.local_devices(), ElementsAre("/job:localhost/task:1/device:CPU:0", "/job:localhost/task:1/device:CPU:2")); EXPECT_THAT(reduced_mesh.hosts(), SizeIs(1)); } TEST_F(LayoutTest, MultiHostFullyShardedReducedMesh) { StatusOr<Layout> layout = Layout::FromString( "sharding_specs:x,y, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); Mesh reduced_mesh = layout->ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 8); EXPECT_THAT(reduced_mesh.global_device_ids(), ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); EXPECT_THAT(reduced_mesh.local_device_ids(), ElementsAre(4, 5, 6, 7)); EXPECT_THAT(reduced_mesh.local_devices(), ElementsAre("/job:localhost/task:1/device:CPU:0", "/job:localhost/task:1/device:CPU:1", "/job:localhost/task:1/device:CPU:2", "/job:localhost/task:1/device:CPU:3")); EXPECT_EQ(reduced_mesh.hosts().size(), 1); } TEST_F(LayoutTest, FlippedShardedMultiHostMeshes) { StatusOr<Layout> multi_host_layout_1 = Layout::FromString( "sharding_specs:x,y, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); StatusOr<Layout> multi_host_layout_2 = Layout::FromString( "sharding_specs:x,y, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|6,7,4,5|" "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3," "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1"); Mesh reduced_mesh_1 = multi_host_layout_1->ReducedMesh(); Mesh reduced_mesh_2 = multi_host_layout_2->ReducedMesh(); EXPECT_FALSE(reduced_mesh_1 == reduced_mesh_2); } TEST_F(LayoutTest, ShardEqualityOneDim) { ShardVector shard_vec1; Shard shard1{1}; shard_vec1.shards.push_back(shard1); shard_vec1.num_shards_per_dim.push_back(1); ShardVector shard_vec2; Shard shard2{2}; Shard shard3{3}; shard_vec2.shards.push_back(shard1); shard_vec2.shards.push_back(shard2); shard_vec2.shards.push_back(shard3); shard_vec2.num_shards_per_dim.push_back(3); EXPECT_EQ(shard_vec1, shard_vec2); } TEST_F(LayoutTest, ShardEqualityOneDimOffset) { ShardVector shard_vec1; Shard shard1{3}; shard_vec1.shards.push_back(shard1); shard_vec1.num_shards_per_dim.push_back(3); ShardVector shard_vec2; Shard shard2{7}; Shard shard3{8}; Shard shard4{9}; shard_vec2.shards.push_back(shard2); shard_vec2.shards.push_back(shard3); shard_vec2.shards.push_back(shard4); shard_vec2.num_shards_per_dim.push_back(9); EXPECT_EQ(shard_vec1, shard_vec2); } TEST_F(LayoutTest, ShardEqualityTwoDims) { auto GenFullVector = [](std::vector<int> num_shards_per_dim) -> ShardVector { ShardVector shard_vec; shard_vec.num_shards_per_dim = num_shards_per_dim; for (int i = 1; i <= num_shards_per_dim[0]; ++i) for (int j = 1; j <= num_shards_per_dim[1]; ++j) { Shard shard{i, j}; shard_vec.shards.push_back(shard); } return shard_vec; }; std::vector<int> num_shards_per_dim_1{2, 4}; ShardVector shard_vec1 = GenFullVector(num_shards_per_dim_1); std::vector<int> num_shards_per_dim_2{3, 3}; ShardVector shard_vec2 = GenFullVector(num_shards_per_dim_2); EXPECT_EQ(shard_vec1, shard_vec2); } TEST_F(LayoutTest, Shards) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=3|*CPU")); ShardVector shard_vec = layout.GetShardVector(); std::string expected_shard_vec_str = "shards:[(1,1),(1,2),(1,3),(2,1),(2,2),(2,3)] num_shards_per_dim:(2,3)"; EXPECT_EQ(shard_vec.ToString(), expected_shard_vec_str); } TEST_F(LayoutTest, ShardsInverted) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:y,x, mesh:|x=2,y=3|*CPU")); ShardVector shards = layout.GetShardVector(); std::string expected_shards = "shards:[(1,1),(2,1),(3,1),(1,2),(2,2),(3,2)] num_shards_per_dim:(3,2)"; EXPECT_EQ(shards.ToString(), expected_shards); } TEST_F(LayoutTest, HostShardMap) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x,y, mesh:TPU|x=2,y=2|*TPU")); std::string host_name = layout.mesh().hosts()[0]; auto host_map = layout.HostShardMap(); std::string expected_shards = "shards:[(1,1),(1,2),(2,1),(2,2)] num_shards_per_dim:(2,2)"; EXPECT_EQ(host_map.find(host_name)->second.ToString(), expected_shards); } TEST_F(LayoutTest, MultiHostMultiDeviceShards) { std::string host1 = "/job:localhost/task:0"; std::string host2 = "/job:localhost/task:1"; std::string device1 = "/device:TPU:0"; std::string device2 = "/device:TPU:1"; TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString( "sharding_specs:x,unsharded, mesh:TPU|x=4,y=1|0,1,2,3|0,1,2,3|" + host1 + device1 + "," + host1 + device2 + "," + host2 + device1 + "," + host2 + device2)); std::string expected_shard_vec = "shards:[(1,1),(2,1),(3,1),(4,1)] num_shards_per_dim:(4,1)"; EXPECT_EQ(layout.GetShardVector().ToString(), expected_shard_vec); std::map<std::string, ShardVector> host_shard_map = layout.HostShardMap(); std::string expected_shards_host1 = "shards:[(1,1),(2,1)] num_shards_per_dim:(4,1)"; ShardVector host1_shard_vec = host_shard_map.find(host1)->second; EXPECT_EQ(host1_shard_vec.ToString(), expected_shards_host1); std::string expected_shards_host2 = "shards:[(3,1),(4,1)] num_shards_per_dim:(4,1)"; ShardVector host2_shard_vec = host_shard_map.find(host2)->second; EXPECT_EQ(host2_shard_vec.ToString(), expected_shards_host2); } TEST_F(LayoutTest, MultiHostCommXYSharded) { std::string host_0 = "/job:localhost/task:0/"; std::string host_1 = "/job:localhost/task:1/"; StatusOr<Layout> send_layout = Layout::FromString("sharding_specs:y,x, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + host_0 + "device:CPU:0," + host_0 + "device:CPU:1," + host_1 + "device:CPU:0," + host_1 + "device:CPU:1"); StatusOr<Layout> recv_layout = Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + host_0 + "device:TPU:0," + host_0 + "device:TPU:1," + host_1 + "device:TPU:0," + host_1 + "device:TPU:1"); std::vector<std::string> send_hosts = send_layout->ReducedMesh().hosts(); std::vector<std::string> recv_hosts = recv_layout->ReducedMesh().hosts(); EXPECT_TRUE(send_hosts == recv_hosts); } TEST_F(LayoutTest, MultiHostCommXSharded) { std::vector<std::string> hosts{"/job:localhost/task:0", "/job:localhost/task:1"}; StatusOr<Layout> send_layout = Layout::FromString( "sharding_specs:x, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + hosts[0] + "/device:CPU:0," + hosts[0] + "/device:CPU:1," + hosts[1] + "/device:CPU:0," + hosts[1] + "/device:CPU:1"); StatusOr<Layout> recv_layout = Layout::FromString( "sharding_specs:x, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + hosts[0] + "/device:TPU:0," + hosts[0] + "/device:TPU:1," + hosts[1] + "/device:TPU:0," + hosts[1] + "/device:TPU:1"); std::vector<std::string> send_hosts = send_layout->ReducedMesh().hosts(); std::vector<std::string> recv_hosts = recv_layout->ReducedMesh().hosts(); EXPECT_TRUE(send_hosts == recv_hosts); std::map<std::string, ShardVector> send_host_shard_map = send_layout->HostShardMap(); std::map<std::string, ShardVector> recv_host_shard_map = recv_layout->HostShardMap(); for (const std::string& host : hosts) { ShardVector shard_vec_in_send_host = send_host_shard_map.find(host)->second; ShardVector shard_vec_in_recv_host = recv_host_shard_map.find(host)->second; EXPECT_EQ(shard_vec_in_send_host, shard_vec_in_recv_host); } } TEST_F(LayoutTest, Transposed2DLayout) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:y,x, mesh:|x=2,y=2|*CPU")); EXPECT_THAT(Layout::Transposed2D(layout), IsOkAndHolds(expected_layout)); } TEST_F(LayoutTest, Transposed2DLayoutWithBatch) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString( "sharding_specs:b1,b2,x,y, mesh:|x=2,y=2,b1=2,b2=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString( "sharding_specs:b1,b2,y,x, mesh:|x=2,y=2,b1=2,b2=2|*CPU")); EXPECT_THAT(Layout::Transposed2D(layout), IsOkAndHolds(expected_layout)); } TEST_F(LayoutTest, MeshDimensionIndex) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=2|*CPU")); EXPECT_THAT(layout.mesh().idx_for_dim("x"), IsOkAndHolds(0)); EXPECT_THAT(layout.mesh().idx_for_dim("y"), IsOkAndHolds(1)); } TEST_F(LayoutTest, TruncateBeginning) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); EXPECT_EQ(layout.Truncate(1), expected_layout); } TEST_F(LayoutTest, TruncateEnd) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:y, mesh:CPU|x=2,y=2|*CPU")); EXPECT_EQ(layout.Truncate(1, true), expected_layout); } TEST_F(LayoutTest, Concatenate) { TF_ASSERT_OK_AND_ASSIGN( auto layout_1, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto layout_2, Layout::FromString("sharding_specs:y, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:x,y, mesh:CPU|x=2,y=2|*CPU")); EXPECT_THAT(ConcatenateLayouts(layout_1, layout_2), IsOkAndHolds(expected_layout)); } TEST_F(LayoutTest, ConcatenateDifferentMesh) { TF_ASSERT_OK_AND_ASSIGN( auto layout_1, Layout::FromString("sharding_specs:x, mesh:CPU|x=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto layout_2, Layout::FromString("sharding_specs:y, mesh:CPU|y=2|*CPU")); auto layout = ConcatenateLayouts(layout_1, layout_2); EXPECT_FALSE(layout.ok()) << layout.status(); } TEST_F(LayoutTest, ConcatenateSameDimension) { TF_ASSERT_OK_AND_ASSIGN( auto layout_1, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto layout_2, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); auto layout = ConcatenateLayouts(layout_1, layout_2); EXPECT_FALSE(layout.ok()) << layout.status(); } TEST_F(LayoutTest, EmptyMeshDeviceType) { auto mesh = Mesh::Empty(); EXPECT_EQ(mesh.device_type(), std::string()); } TEST_F(LayoutTest, ConvertMeshDeviceType) { TF_ASSERT_OK_AND_ASSIGN(Mesh mesh, Mesh::FromString("mesh_name|x=2,batch=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN(Mesh cpu_mesh, mesh.ToDeviceType("CPU")); EXPECT_TRUE(cpu_mesh.is_cpu_mesh()); std::string expected_task_name = "/job:localhost/replica:0/task:0/"; TF_ASSERT_OK_AND_ASSIGN( Mesh expected_mesh, Mesh::FromString("|x=2,batch=1|0,1|0,1|" + expected_task_name + "device:CPU:0," + expected_task_name + "device:CPU:1")); EXPECT_EQ(cpu_mesh, expected_mesh); } TEST_F(LayoutTest, EquivalentLayout) { TF_ASSERT_OK_AND_ASSIGN( Layout fully_sharded, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout x_sharded, Layout::FromString("sharding_specs:x,unsharded, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout y_sharded, Layout::FromString("sharding_specs:unsharded,y, mesh:|x=2,y=1|*TPU")); EXPECT_TRUE(fully_sharded.IsEquivalent(x_sharded)); EXPECT_TRUE(x_sharded.IsEquivalent(fully_sharded)); EXPECT_FALSE(fully_sharded.IsEquivalent(y_sharded)); EXPECT_FALSE(y_sharded.IsEquivalent(fully_sharded)); } TEST_F(LayoutTest, GetSingleDeviceMeshEmptyDeviceString) { EXPECT_THAT(Mesh::GetSingleDeviceMesh(""), StatusIs(tsl::error::INVALID_ARGUMENT)); } TEST_F(LayoutTest, GetSingleDeviceMeshSuccess) { TF_ASSERT_OK_AND_ASSIGN( auto mesh, Mesh::FromString("/job:localhost/task:1/device:CPU:0")); EXPECT_THAT(Mesh::GetSingleDeviceMesh("/job:localhost/task:1/device:CPU:0"), IsOkAndHolds(mesh)); } TEST_F(LayoutTest, GetSingleDeviceLayoutInvalidMesh) { auto mesh = Mesh::Empty(); EXPECT_THAT(Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh), StatusIs(tsl::error::INVALID_ARGUMENT)); } TEST_F(LayoutTest, GetSingleDeviceLayoutSuccess) { TF_ASSERT_OK_AND_ASSIGN( auto mesh, Mesh::FromString("/job:localhost/task:1/device:CPU:0")); TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString( "maximal:true, mesh:/job:localhost/task:1/device:CPU:0")); EXPECT_THAT(Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh), IsOkAndHolds(layout)); } TEST(DynamicSizeTest, IsDynamicSize) { EXPECT_TRUE(IsDynamicSize(-1)); EXPECT_TRUE(IsDynamicSize(mlir::ShapedType::kDynamic)); EXPECT_FALSE(IsDynamicSize(10)); } TEST_F(LayoutTest, LayoutType) { TF_ASSERT_OK_AND_ASSIGN( auto maximal, Layout::FromString( "maximal:true, mesh:/job:localhost/task:1/device:CPU:0")); EXPECT_EQ(maximal.type(), Layout::LayoutType::kSingleDevice); TF_ASSERT_OK_AND_ASSIGN(auto parted, Layout::FromString("parted:x, mesh:|x=2|*TPU")); EXPECT_EQ(parted.type(), Layout::LayoutType::kParted); TF_ASSERT_OK_AND_ASSIGN( auto static_layout, Layout::FromString("sharding_specs:x, mesh:|x=2|*TPU")); EXPECT_EQ(static_layout.type(), Layout::LayoutType::kStatic); } TEST_F(LayoutTest, PartedLayoutToFromString) { TF_ASSERT_OK_AND_ASSIGN(Layout layout, BatchLayout().ToParted()); std::string layout_str = layout.ToString(); TF_ASSERT_OK_AND_ASSIGN(Layout layout_from_str, Layout::FromString(layout_str)); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_from_str_proto, layout_from_str.ToProto()); EXPECT_THAT(layout.ToProto(), IsOkAndHolds(EqualsProto(layout_from_str_proto))); } TEST_F(LayoutTest, RaggedLayoutEqual) { TF_ASSERT_OK_AND_ASSIGN( Layout fully_sharded, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout x_sharded, Layout::FromString("sharding_specs:x,unsharded, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout x_parted, Layout::FromString("parted:x,unsharded, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN(Layout x_y_parted, Layout::FromString("parted:x,y, mesh:|x=2,y=1|*TPU")); EXPECT_TRUE(x_parted.IsEquivalent(x_y_parted)); EXPECT_TRUE(x_y_parted.IsEquivalent(x_parted)); EXPECT_FALSE(x_sharded.IsEquivalent(x_parted)); EXPECT_FALSE(fully_sharded.IsEquivalent(x_y_parted)); EXPECT_FALSE(x_sharded == x_parted); EXPECT_FALSE(fully_sharded == x_y_parted); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/cc/tensor_layout.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/tensor_layout_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6bc26f15-8535-45cf-abc2-4451d8d04c30
cpp
tensorflow/tensorflow
slice_util
tensorflow/dtensor/cc/slice_util.cc
tensorflow/dtensor/tests/slice_util_test.cc
#include "tensorflow/dtensor/cc/slice_util.h" #include <optional> #include <string> #include <vector> #include "mlir/IR/BuiltinTypes.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { namespace slice_util { namespace { StatusOr<int64_t> GetEllipsisSize(int64_t input_rank, const std::vector<Token>& tokens, int64_t* output_rank) { bool found = false; int64_t regular_axis = 0; int64_t new_axis = 0; int64_t shrink_axis = 0; for (const auto& token : tokens) { switch (token.token_type) { case Token::ELLIPSIS: if (found) { return absl::InvalidArgumentError( "More than one ellipsis was found."); } found = true; break; case Token::NEW_AXIS: ++new_axis; break; case Token::SHRINK_AXIS: ++shrink_axis; break; case Token::REGULAR: ++regular_axis; break; } } int64_t ellipsis_size = input_rank - (regular_axis + shrink_axis); if (found && ellipsis_size < 0) { return absl::InvalidArgumentError(absl::StrCat( "Ellipsis was found, but there is no remaining axis for it.", " input_rank=", input_rank, " regular_axis=", regular_axis, " shrink_axis=", shrink_axis)); } *output_rank = regular_axis + ellipsis_size + new_axis; return ellipsis_size; } } Token Token::normalize(int64_t dim_size) const { if (dynamic_mask) { return *this; } int64_t new_begin = begin; int dir = (stride > 0) ? 1 : -1; if (begin_mask) { if (dir > 0) { new_begin = 0; } else { new_begin = dim_size - 1; } } int64_t new_end = end; if (end_mask) { if (dir > 0) { new_end = dim_size; } else { new_end = -1; } } int64_t shift = (new_begin - new_begin % dim_size); new_begin -= shift; new_end -= shift; int64_t n = dir * (new_end - new_begin + stride - dir) / (dir * stride); if (n < 0) { new_end = new_end + dir * dim_size; } n = dir * (new_end - new_begin + stride - dir) / (dir * stride); new_end = new_begin + n * stride; Token r = *this; r.begin = new_begin; r.end = new_end; return r; } std::optional<Token> Token::GetLocalToken(int64_t dim_size, int64_t num_shards) const { Token token = normalize(dim_size); VLOG(5) << "Compute: " << "dim_size=" << dim_size << " num_shards=" << num_shards << " token.begin=" << token.begin << " token.end=" << token.end << " token.stride=" << token.stride; if (token.begin_mask && token.end_mask) return token; if (token.dynamic_mask) return std::nullopt; if (token.stride < 0) return std::nullopt; int64_t shard_dim_size = dim_size / num_shards; if (shard_dim_size % token.stride == 0) { if (token.begin >= 0 && token.begin < token.stride && token.end >= dim_size && token.end < dim_size + token.stride) { token.end = shard_dim_size + (token.end - dim_size); return token; } } return std::nullopt; } Status TokenProcessor::Run(const std::vector<Token>& tokens) { int64_t input_rank = input_rank_; int64_t output_rank; TF_ASSIGN_OR_RETURN(int64_t ellipsis_size, GetEllipsisSize(input_rank, tokens, &output_rank)); PrepareResults(tokens.size(), input_rank, output_rank); bool out_of_bound = false; int64_t input_index = 0; int64_t output_index = 0; for (const auto& token : tokens) { switch (token.token_type) { case Token::ELLIPSIS: VisitEllipsisAxis(token); out_of_bound = VisitLoop(input_rank, output_rank, ellipsis_size, &input_index, &output_index); ellipsis_size = 0; break; case Token::SHRINK_AXIS: VisitShrinkAxis(token, input_index, output_index); ++input_index; break; case Token::NEW_AXIS: VisitNewAxis(token, input_index, output_index); ++output_index; break; case Token::REGULAR: if (input_index >= input_rank) { out_of_bound = true; break; } VisitRegularAxis(token, input_index, output_index); ++input_index; ++output_index; break; } if (out_of_bound) { break; } } if (ellipsis_size > 0) { out_of_bound = VisitLoop(input_rank, output_rank, ellipsis_size, &input_index, &output_index); } if (out_of_bound) { return absl::InvalidArgumentError( "Reading axis beyond the input tensor's rank. " "The slicing token is incorrect."); } return FinalizeResults(input_rank, output_rank); } bool TokenProcessor::VisitLoop(int64_t input_rank, int64_t output_rank, int64_t ellipsis_size, int64_t* input_index, int64_t* output_index) { for (int64_t k = 0; k < ellipsis_size; ++k) { if (*input_index >= input_rank) { return true; } VisitImplicitAxis(*input_index, *output_index); ++*input_index; ++*output_index; } return false; } } } }
#include "tensorflow/dtensor/cc/slice_util.h" #include <map> #include <memory> #include <ostream> #include <string> #include <vector> #include <gmock/gmock.h> #include "tensorflow/core/platform/test.h" #include "tensorflow/dtensor/cc/tensor_layout.h" #include "tensorflow/dtensor/proto/layout.pb.h" #include "tsl/platform/status_matchers.h" namespace tensorflow { namespace dtensor { namespace slice_util { namespace { using ::testing::SizeIs; using ::tsl::testing::IsOk; TEST(TokenTest, NormalizeDynamic) { auto spec = Token(Token::REGULAR, 0, 0, 1, true, true, true); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 0); EXPECT_EQ(spec.normalize(4).dynamic_mask, true); EXPECT_EQ(spec.normalize(4).begin_mask, true); EXPECT_EQ(spec.normalize(4).end_mask, true); } TEST(TokenTest, NormalizeFullPositiveStride) { auto spec = Token(Token::REGULAR, 0, 4, 1); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 4); spec = Token(Token::REGULAR, 0, 4, 2); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 4); spec = Token(Token::REGULAR, 0, 4, 3); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 6); spec = Token(Token::REGULAR, 0, 4, 5); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 5); } TEST(TokenTest, NormalizeFullNegativeStride) { auto spec = Token(Token::REGULAR, 3, -1, -1); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -1); spec = Token(Token::REGULAR, 3, -1, -2); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -1); spec = Token(Token::REGULAR, 3, -1, -3); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -3); spec = Token(Token::REGULAR, 3, -1, -5); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -2); } TEST(TokenTest, NormalizeZeroPositiveStride) { auto spec = Token(Token::REGULAR, 3, 3, 1); EXPECT_EQ(spec.normalize(7).begin, 3); EXPECT_EQ(spec.normalize(7).end, 3); spec = Token(Token::REGULAR, 0, 0, 1); EXPECT_EQ(spec.normalize(7).begin, 0); EXPECT_EQ(spec.normalize(7).end, 0); } TEST(TokenTest, NormalizeZeroNegativeStride) { auto spec = Token(Token::REGULAR, 3, 3, -1); EXPECT_EQ(spec.normalize(7).begin, 3); EXPECT_EQ(spec.normalize(7).end, 3); spec = Token(Token::REGULAR, 0, 0, -1); EXPECT_EQ(spec.normalize(7).begin, 0); EXPECT_EQ(spec.normalize(7).end, 0); } TEST(TokenTest, NormalizePartialPositiveStride) { auto spec = Token(Token::REGULAR, 1, 5, 1); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 5); spec = Token(Token::REGULAR, 1, 5, 2); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 5); spec = Token(Token::REGULAR, 1, 5, 3); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 7); spec = Token(Token::REGULAR, 1, 5, 5); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 6); spec = Token(Token::REGULAR, 1, -1, 1); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 6); spec = Token(Token::REGULAR, 0, -1, 1); EXPECT_EQ(spec.normalize(7).begin, 0); EXPECT_EQ(spec.normalize(7).end, 6); } TEST(TokenTest, NormalizePartialNegativeStride) { auto spec = Token(Token::REGULAR, 6, 2, -1); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 2); spec = Token(Token::REGULAR, 6, 2, -2); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 2); spec = Token(Token::REGULAR, 6, 2, -3); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 0); spec = Token(Token::REGULAR, 6, 2, -5); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 1); } TEST(TokenTest, NormalizeFarFromCenter) { auto spec = Token(Token::REGULAR, 100, 102, 1); EXPECT_EQ(spec.normalize(9).begin, 1); EXPECT_EQ(spec.normalize(9).end, 3); } TEST(TokenTest, NormalizeBeginMask) { auto spec = Token(Token::REGULAR, 3, 2, 1); spec.begin_mask = true; EXPECT_EQ(spec.normalize(7).begin, 0); spec = Token(Token::REGULAR, 3, 2, -1); spec.begin_mask = true; EXPECT_EQ(spec.normalize(7).begin, 6); } TEST(TokenTest, NormalizeEndMask) { auto spec = Token(Token::REGULAR, 3, 2, 1); spec.end_mask = true; EXPECT_EQ(spec.normalize(7).end, 7); spec = Token(Token::REGULAR, 3, 2, -1); spec.end_mask = true; EXPECT_EQ(spec.normalize(7).end, -1); } class InferenceTest : public ::testing::Test { protected: Mesh GetMesh() { return Mesh::CreateMesh("MyMesh", {"x", "y"}, {2, 1}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, false); } }; TEST_F(InferenceTest, FullyReplicatedInputs) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::REGULAR, 0, -1, 1, false, false, false), Token(Token::REGULAR, 0, 2, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ( forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, -1); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ( backward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, -1); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, NewAxisMask) { const Layout input_layout = *Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim, "x", "y"}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 4, 1, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(4)); EXPECT_EQ(forward->local_tokens()[0].end, 0); EXPECT_EQ(forward->local_tokens()[1].end, 0); EXPECT_EQ(forward->local_tokens()[2].end, 1); EXPECT_EQ(forward->local_tokens()[3].end, 4); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>( {Layout::kUnshardedDim, Layout::kUnshardedDim, "x", "y"})); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_THAT(backward->local_tokens(), SizeIs(4)); EXPECT_EQ(backward->local_tokens()[0].end, 0); EXPECT_EQ(backward->local_tokens()[1].end, 0); EXPECT_EQ(backward->local_tokens()[2].end, 1); EXPECT_EQ(backward->local_tokens()[3].end, 4); } TEST_F(InferenceTest, ShrinkAxisMask) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::REGULAR, 0, -1, 1, false, false, false), Token(Token::SHRINK_AXIS, 0, 2, 1, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ( forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, -1); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->local_tokens()[0].end, -1); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, EllipsisMask) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{"x", "y", Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{"x", "y", Layout::kUnshardedDim, Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::ELLIPSIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4, 6}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y", Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(3)); EXPECT_EQ(forward->local_tokens()[0].end, 0); EXPECT_EQ(forward->local_tokens()[1].end, 0); EXPECT_EQ(forward->local_tokens()[2].end, 0); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4, 6}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y", Layout::kUnshardedDim, Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_THAT(backward->local_tokens(), SizeIs(3)); EXPECT_EQ(backward->local_tokens()[0].end, 0); EXPECT_EQ(backward->local_tokens()[1].end, 0); EXPECT_EQ(backward->local_tokens()[2].end, 0); } TEST_F(InferenceTest, EllipsisNewAxisEndMask) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::ELLIPSIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::REGULAR, 0, 0, 1, false, true, true), }; auto forward = CreateAndRun<ForwardLayoutInference>(specs, input_layout, std::vector<int64_t>{2}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(3)); EXPECT_EQ(forward->local_tokens()[0].end, 0); EXPECT_EQ(forward->local_tokens()[1].end, 0); EXPECT_EQ(forward->local_tokens()[2].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(backward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(3)); EXPECT_EQ(backward->local_tokens()[0].end, 0); EXPECT_EQ(backward->local_tokens()[1].end, 0); EXPECT_EQ(backward->local_tokens()[2].end, 2); } TEST_F(InferenceTest, AdditionalAxes) { const Layout input_layout = *Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh()); const Layout output_layout = *Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 0, 1, false, true, true)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(1)); EXPECT_EQ(forward->local_tokens()[0].begin_mask, true); EXPECT_EQ(forward->local_tokens()[0].end_mask, true); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y"})); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_THAT(backward->local_tokens(), SizeIs(1)); EXPECT_EQ(forward->local_tokens()[0].begin_mask, true); EXPECT_EQ(forward->local_tokens()[0].end_mask, true); } TEST_F(InferenceTest, ShardingOnNonSlicedDimension) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 2, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", Layout::kUnshardedDim})); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 1); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({"x", Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 1); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout1) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 4, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 2); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 2); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout2) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "y"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "y"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 4, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "y"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 2); EXPECT_EQ(forward->local_tokens()[1].end, 4); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "y"})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 2); EXPECT_EQ(backward->local_tokens()[1].end, 4); } TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout3) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 3, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 2); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 2); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, StrideOnShardedDimensionNeedRelayout) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, -1, 1, false, false, false), Token(Token::REGULAR, 0, 4, 3, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ( forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, -1); EXPECT_EQ(forward->local_tokens()[1].end, 4); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, -1); EXPECT_EQ(backward->local_tokens()[1].end, 4); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/cc/slice_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/slice_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5dcf037c-7363-4ecd-9b42-7d5c93368df1
cpp
tensorflow/tensorflow
layout_to_xla_sharding
tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.cc
tensorflow/dtensor/tests/layout_to_xla_sharding_test.cc
#include "tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.h" #include <string> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/types/span.h" #include "llvm/ADT/STLExtras.h" #include "xla/status_macros.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { namespace { void PopulateDevices(absl::Span<const int64_t> permutation, absl::Span<const int64_t> sizes, absl::Span<const int64_t> cum_sizes, std::vector<int64_t>* out_devices, int64_t base = 0) { int expanding_dim = permutation[0]; int expanding_dim_size = sizes[expanding_dim]; int expanding_cum_dim_size = cum_sizes[expanding_dim]; for (int i = 0; i < expanding_dim_size; ++i) { if (permutation.size() == 1) { out_devices->push_back(base + i * expanding_cum_dim_size); } else { PopulateDevices(permutation.subspan(1), sizes, cum_sizes, out_devices, base + i * expanding_cum_dim_size); } } } } std::vector<int64_t> MeshMajorToMinor::ToDeviceList() { std::vector<int64_t> cum_sizes(sizes.size()); int64_t cum_size = 1; for (int i = sizes.size() - 1; i >= 0; --i) { cum_sizes[i] = cum_size; cum_size *= sizes[i]; } std::vector<int64_t> devices; devices.reserve(cum_size * sizes[0]); PopulateDevices(permutation, sizes, cum_sizes, &devices); return devices; } StatusOr<MeshMajorToMinor> ConvertMeshMajorToMinor(const Layout& layout, const Mesh& mesh) { MeshMajorToMinor major_to_minor; major_to_minor.permutation.reserve(mesh.dims().size()); major_to_minor.sizes.reserve(mesh.dims().size()); absl::flat_hash_map<std::string, int64_t> dim_name_to_index_map; for (const auto& [index, mesh_dim] : llvm::enumerate(mesh.dims())) { major_to_minor.sizes.push_back(mesh_dim.size); dim_name_to_index_map[mesh_dim.name] = index; } for (const auto& spec : layout.sharding_spec_strs()) { if (mesh.IsMeshDim(spec)) { const auto it = dim_name_to_index_map.find(spec); TF_RET_CHECK(it != dim_name_to_index_map.end()); const auto& dimension_index = it->second; major_to_minor.permutation.push_back(dimension_index); dim_name_to_index_map.erase(it); } } for (const auto& [name, unused_size] : mesh.dims()) { if (const auto it = dim_name_to_index_map.find(name); it != dim_name_to_index_map.end()) { const auto& dimension_index = it->second; major_to_minor.permutation.push_back(dimension_index); } } TF_RET_CHECK(major_to_minor.permutation.size() == major_to_minor.sizes.size()); return major_to_minor; } StatusOr<::xla::OpSharding> ConvertLayoutToXlaOpSharding(const Layout& layout) { ::xla::OpSharding xla_sharding; if (layout.IsSingleDevice()) { xla_sharding.set_type(::xla::OpSharding::MAXIMAL); return xla_sharding; } else if (layout.IsFullyReplicated()) { xla_sharding.set_type(::xla::OpSharding::REPLICATED); return xla_sharding; } xla_sharding.set_type(::xla::OpSharding::OTHER); const Mesh& mesh = layout.mesh(); { int32 product_of_sharded_dimensions = 1; for (int32 dim_size : layout.num_shards()) { product_of_sharded_dimensions *= dim_size; xla_sharding.add_tile_assignment_dimensions(dim_size); } if (product_of_sharded_dimensions != mesh.num_devices()) { xla_sharding.add_tile_assignment_dimensions( mesh.num_devices() / product_of_sharded_dimensions); xla_sharding.set_replicate_on_last_tile_dim(true); } } TF_ASSIGN_OR_RETURN(auto major_to_minor, ConvertMeshMajorToMinor(layout, mesh)); std::vector<int64_t> tile_assignment_devices = major_to_minor.ToDeviceList(); *(xla_sharding.mutable_tile_assignment_devices()) = { tile_assignment_devices.begin(), tile_assignment_devices.end()}; return xla_sharding; } } }
#include "tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.h" #include <string> #include <vector> #include "absl/algorithm/container.h" #include "absl/strings/str_cat.h" #include "benchmark/benchmark.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace dtensor { namespace { StatusOr<std::string> ConvertLayoutStrToHloShardingStr(std::string layout_str) { TF_ASSIGN_OR_RETURN(const Layout layout, Layout::FromString(layout_str)); TF_ASSIGN_OR_RETURN(const xla::OpSharding op_sharding, ConvertLayoutToXlaOpSharding(layout)); TF_ASSIGN_OR_RETURN(const auto hlo_sharding, xla::HloSharding::FromProto(op_sharding)); return hlo_sharding.ToString(); } TEST(LayoutToXLAShardingTest, ReplicatedLayout1D) { std::string layout_str = "sharding_specs:unsharded, " "mesh:|x=2|0,1|0,1|/job:localhost/task:0/device:CPU:0,/job:localhost/" "task:0/device:CPU:1"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{replicated}", sharding); } TEST(LayoutToXLAShardingTest, ReplicatedLayout2D) { std::string layout_str = "sharding_specs:unsharded,unsharded " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{replicated}", sharding); } TEST(LayoutToXLAShardingTest, ReplicatedLayout3D) { std::string layout_str = "sharding_specs:unsharded,unsharded,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{replicated}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout1D) { std::string layout_str = "sharding_specs:x, " "mesh:|x=3|0,1,2|0,1,2|/job:localhost/task:0/device:CPU:0,/job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[3]0,1,2}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout2D) { std::string layout_str = "sharding_specs:x,y, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2]0,1,2,3}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout2DAsymmetricMesh) { std::string layout_str = "sharding_specs:y,x, " "mesh:|x=2,y=4|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/job:localhost/task:0/device:CPU:1,/job:localhost/task:0/" "device:CPU:2,/job:localhost/task:0/device:CPU:3,/job:localhost/task:0/" "device:CPU:4,/job:localhost/task:0/device:CPU:5,/job:localhost/task:0/" "device:CPU:6,/job:localhost/task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[4,2]0,4,1,5,2,6,3,7}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout2D) { std::string layout_str = "sharding_specs:y,x, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2]0,2,1,3}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout3D) { std::string layout_str = "sharding_specs:x,y,z, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,2]0,1,2,3,4,5,6,7}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout3D_1) { std::string layout_str = "sharding_specs:z,x,y, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,2]0,2,4,6,1,3,5,7}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout3D_2) { std::string layout_str = "sharding_specs:z,y,x, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,2]0,4,2,6,1,5,3,7}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedLayout2D) { std::string layout_str = "sharding_specs:x,unsharded, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout2D) { std::string layout_str = "sharding_specs:y,unsharded, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedLayout3D_1) { std::string layout_str = "sharding_specs:x,y,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedLayout3D_2) { std::string layout_str = "sharding_specs:x,unsharded,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout3D_1) { std::string layout_str = "sharding_specs:z,y,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,1,2]0,4,2,6,1,5,3,7 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout3D_2) { std::string layout_str = "sharding_specs:y,unsharded,z, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,2,2]0,4,1,5,2,6,3,7 last_tile_dim_replicate}", sharding); } void BM_65536Devices(benchmark::State& state) { std::vector<int64_t> device_ids(65536); absl::c_iota(device_ids, 0); std::vector<std::string> devices_str(65536); absl::c_generate(devices_str, [n = 0]() mutable { return absl::StrCat("/job:localhost/task:0/device:CPU:", n++); }); auto mesh = Mesh::CreateMesh("", {"x", "y", "z"}, {8, 128, 64}, device_ids, {}, device_ids, devices_str); TF_ASSERT_OK_AND_ASSIGN(auto layout, Layout::GetLayout({"x", "y", "z"}, mesh)); for (auto s : state) { TF_EXPECT_OK(ConvertLayoutToXlaOpSharding(layout).status()); } } BENCHMARK(BM_65536Devices); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/layout_to_xla_sharding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ec5e98fc-8ed2-4635-ad58-fbb594789502
cpp
tensorflow/tensorflow
benchmark_model
tensorflow/lite/tools/benchmark/benchmark_model.cc
tensorflow/tools/benchmark/benchmark_model_test.cc
#include "tensorflow/lite/tools/benchmark/benchmark_model.h" #include <cstdint> #ifdef __linux__ #include <unistd.h> #endif #include <iostream> #include <memory> #include <sstream> #include <string> #include "tensorflow/lite/profiling/memory_info.h" #include "tensorflow/lite/profiling/time.h" #include "tensorflow/lite/tools/benchmark/benchmark_utils.h" #include "tensorflow/lite/tools/logging.h" namespace tflite { namespace benchmark { using tensorflow::Stat; constexpr int kMemoryCheckIntervalMs = 50; #ifdef __linux__ void GetRssStats(size_t* vsize, size_t* rss, size_t* shared, size_t* code) { FILE* fp = fopen("/proc/self/statm", "rt"); *vsize = 0; *rss = 0; *shared = 0; *code = 0; if (fp == nullptr) return; (void)!fscanf(fp, "%zu %zu %zu %zu", vsize, rss, shared, code); fclose(fp); *vsize = *vsize * getpagesize() >> 20; *rss = *rss * getpagesize() >> 20; *shared = *shared * getpagesize() >> 20; *code = *code * getpagesize() >> 20; } #endif BenchmarkParams BenchmarkModel::DefaultParams() { BenchmarkParams params; params.AddParam("num_runs", BenchmarkParam::Create<int32_t>(50)); params.AddParam("min_secs", BenchmarkParam::Create<float>(1.0f)); params.AddParam("max_secs", BenchmarkParam::Create<float>(150.0f)); params.AddParam("run_delay", BenchmarkParam::Create<float>(-1.0f)); params.AddParam("run_frequency", BenchmarkParam::Create<float>(-1.0f)); params.AddParam("num_threads", BenchmarkParam::Create<int32_t>(-1)); params.AddParam("use_caching", BenchmarkParam::Create<bool>(false)); params.AddParam("benchmark_name", BenchmarkParam::Create<std::string>("")); params.AddParam("output_prefix", BenchmarkParam::Create<std::string>("")); params.AddParam("warmup_runs", BenchmarkParam::Create<int32_t>(1)); params.AddParam("warmup_min_secs", BenchmarkParam::Create<float>(0.5f)); params.AddParam("verbose", BenchmarkParam::Create<bool>(false)); params.AddParam("dry_run", BenchmarkParam::Create<bool>(false)); params.AddParam("report_peak_memory_footprint", BenchmarkParam::Create<bool>(false)); params.AddParam("memory_footprint_check_interval_ms", BenchmarkParam::Create<int32_t>(kMemoryCheckIntervalMs)); params.AddParam("gpu_invoke_loop_times", BenchmarkParam::Create<int32_t>(1)); return params; } BenchmarkModel::BenchmarkModel() : params_(DefaultParams()) {} void BenchmarkLoggingListener::OnBenchmarkEnd(const BenchmarkResults& results) { auto inference_us = results.inference_time_us(); auto init_us = results.startup_latency_us(); auto warmup_us = results.warmup_time_us(); auto init_mem_usage = results.init_mem_usage(); auto overall_mem_usage = results.overall_mem_usage(); TFLITE_LOG(INFO) << "Inference timings in us: " << "Init: " << init_us << ", " << "First inference: " << warmup_us.first() << ", " << "Warmup (avg): " << warmup_us.avg() << ", " << "Inference (avg): " << inference_us.avg(); if (!init_mem_usage.IsSupported()) return; TFLITE_LOG(INFO) << "Note: as the benchmark tool itself affects memory footprint, the " "following is only APPROXIMATE to the actual memory footprint of the " "model at runtime. Take the information at your discretion."; TFLITE_LOG(INFO) << "Memory footprint delta from the start of the tool (MB): " << "init=" << init_mem_usage.mem_footprint_kb / 1024.0 << " overall=" << overall_mem_usage.mem_footprint_kb / 1024.0; auto peak_mem_mb = results.peak_mem_mb(); if (peak_mem_mb > 0) { TFLITE_LOG(INFO) << "Overall peak memory footprint (MB) via periodic monitoring: " << peak_mem_mb; #ifdef __linux__ size_t vsize, rss, shared, code; GetRssStats(&vsize, &rss, &shared, &code); TFLITE_LOG(INFO) << "Memory status at the end of exeution:"; TFLITE_LOG(INFO) << "- VmRSS : " << rss << " MB"; TFLITE_LOG(INFO) << "+ RssAnnon : " << rss - shared << " MB"; TFLITE_LOG(INFO) << "+ RssFile + RssShmem : " << shared << " MB"; #endif } } std::vector<Flag> BenchmarkModel::GetFlags() { return { CreateFlag<int32_t>( "num_runs", &params_, "expected number of runs, see also min_secs, max_secs"), CreateFlag<float>( "min_secs", &params_, "minimum number of seconds to rerun for, potentially making the " "actual number of runs to be greater than num_runs"), CreateFlag<float>( "max_secs", &params_, "maximum number of seconds to rerun for, potentially making the " "actual number of runs to be less than num_runs. Note if --max-secs " "is exceeded in the middle of a run, the benchmark will continue to " "the end of the run but will not start the next run."), CreateFlag<float>("run_delay", &params_, "delay between runs in seconds"), CreateFlag<float>( "run_frequency", &params_, "Execute at a fixed frequency, instead of a fixed delay." "Note if the targeted rate per second cannot be reached, the " "benchmark would start the next run immediately, trying its best to " "catch up. If set, this will override run_delay."), CreateFlag<int32_t>("num_threads", &params_, "number of threads"), CreateFlag<bool>( "use_caching", &params_, "Enable caching of prepacked weights matrices in matrix " "multiplication routines. Currently implies the use of the Ruy " "library."), CreateFlag<std::string>("benchmark_name", &params_, "benchmark name"), CreateFlag<std::string>("output_prefix", &params_, "benchmark output prefix"), CreateFlag<int32_t>( "warmup_runs", &params_, "minimum number of runs performed on initialization, to " "allow performance characteristics to settle, see also " "warmup_min_secs"), CreateFlag<float>( "warmup_min_secs", &params_, "minimum number of seconds to rerun for, potentially making the " "actual number of warm-up runs to be greater than warmup_runs"), CreateFlag<bool>("verbose", &params_, "Whether to log parameters whose values are not set. " "By default, only log those parameters that are set by " "parsing their values from the commandline flags."), CreateFlag<bool>("dry_run", &params_, "Whether to run the tool just with simply loading the " "model, allocating tensors etc. but without actually " "invoking any op kernels."), CreateFlag<bool>( "report_peak_memory_footprint", &params_, "Report the peak memory footprint by periodically checking the " "memory footprint. Internally, a separate thread will be spawned for " "this periodic check. Therefore, the performance benchmark result " "could be affected."), CreateFlag<int32_t>("memory_footprint_check_interval_ms", &params_, "The interval in millisecond between two consecutive " "memory footprint checks. This is only used when " "--report_peak_memory_footprint is set to true."), CreateFlag<int32_t>( "gpu_invoke_loop_times", &params_, "Number of GPU delegate invoke loop iterations. If > 0 then reported " "latency is divided by this number. Used only when " "TFLITE_GPU_ENABLE_INVOKE_LOOP is defined.")}; } void BenchmarkModel::LogParams() { const bool verbose = params_.Get<bool>("verbose"); TFLITE_LOG(INFO) << "Log parameter values verbosely: [" << verbose << "]"; LOG_BENCHMARK_PARAM(int32_t, "num_runs", "Min num runs", verbose); LOG_BENCHMARK_PARAM(float, "min_secs", "Min runs duration (seconds)", verbose); LOG_BENCHMARK_PARAM(float, "max_secs", "Max runs duration (seconds)", verbose); LOG_BENCHMARK_PARAM(float, "run_delay", "Inter-run delay (seconds)", verbose); LOG_BENCHMARK_PARAM(float, "run_frequency", "Number of prorated runs per second", verbose); LOG_BENCHMARK_PARAM(int32_t, "num_threads", "Num threads", verbose); LOG_BENCHMARK_PARAM(bool, "use_caching", "Use caching", verbose); LOG_BENCHMARK_PARAM(std::string, "benchmark_name", "Benchmark name", verbose); LOG_BENCHMARK_PARAM(std::string, "output_prefix", "Output prefix", verbose); LOG_BENCHMARK_PARAM(int32_t, "warmup_runs", "Min warmup runs", verbose); LOG_BENCHMARK_PARAM(float, "warmup_min_secs", "Min warmup runs duration (seconds)", verbose); LOG_BENCHMARK_PARAM(bool, "dry_run", "Run w/o invoking kernels", verbose); LOG_BENCHMARK_PARAM(bool, "report_peak_memory_footprint", "Report the peak memory footprint", verbose); LOG_BENCHMARK_PARAM(int32_t, "memory_footprint_check_interval_ms", "Memory footprint check interval (ms)", verbose); #ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP LOG_BENCHMARK_PARAM(int32_t, "gpu_invoke_loop_times", "Number of GPU delegate invoke loop iterations. Latency " "will be divided by it.", verbose); #endif } TfLiteStatus BenchmarkModel::PrepareInputData() { return kTfLiteOk; } TfLiteStatus BenchmarkModel::ResetInputsAndOutputs() { return kTfLiteOk; } Stat<int64_t> BenchmarkModel::Run(int min_num_times, float min_secs, float max_secs, RunType run_type, TfLiteStatus* invoke_status) { Stat<int64_t> run_stats; TFLITE_LOG(INFO) << "Running benchmark for at least " << min_num_times << " iterations and at least " << min_secs << " seconds but" << " terminate if exceeding " << max_secs << " seconds."; int64_t now_us = profiling::time::NowMicros(); int64_t min_finish_us = now_us + static_cast<int64_t>(min_secs * 1.e6f); int64_t max_finish_us = now_us + static_cast<int64_t>(max_secs * 1.e6f); *invoke_status = kTfLiteOk; float inter_run_sleep_time = params_.Get<float>("run_delay"); auto run_frequency = params_.Get<float>("run_frequency"); double manual_inter_run_gap = 1.0 / run_frequency; double next_run_finish_time = now_us * 1e-6 + manual_inter_run_gap; for (int run = 0; (run < min_num_times || now_us < min_finish_us) && now_us <= max_finish_us; run++) { ResetInputsAndOutputs(); listeners_.OnSingleRunStart(run_type); int64_t start_us = profiling::time::NowMicros(); TfLiteStatus status = RunImpl(); int64_t end_us = profiling::time::NowMicros(); listeners_.OnSingleRunEnd(); int64_t run_duration_us = end_us - start_us; #ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP int32_t gpu_invoke_loop_times = params_.Get<int>("gpu_invoke_loop_times"); if (gpu_invoke_loop_times > 0) { run_duration_us = static_cast<int64_t>( static_cast<double>(run_duration_us) / gpu_invoke_loop_times); } #endif run_stats.UpdateStat(run_duration_us); if (run_frequency > 0) { inter_run_sleep_time = next_run_finish_time - profiling::time::NowMicros() * 1e-6; next_run_finish_time += manual_inter_run_gap; } util::SleepForSeconds(inter_run_sleep_time); now_us = profiling::time::NowMicros(); if (status != kTfLiteOk) { *invoke_status = status; } } std::stringstream stream; run_stats.OutputToStream(&stream); TFLITE_LOG(INFO) << stream.str() << std::endl; return run_stats; } TfLiteStatus BenchmarkModel::ValidateParams() { if (params_.Get<bool>("report_peak_memory_footprint")) { const int32_t interval = params_.Get<int32_t>("memory_footprint_check_interval_ms"); if (interval <= 0) { TFLITE_LOG(WARN) << "--memory_footprint_check_interval_ms is set to " << interval << " (ms), This value is invalid, and it will be set to " "the default value " << kMemoryCheckIntervalMs << " (ms)."; params_.Set<int32_t>("memory_footprint_check_interval_ms", kMemoryCheckIntervalMs); } } return kTfLiteOk; } TfLiteStatus BenchmarkModel::Run(int argc, char** argv) { TF_LITE_ENSURE_STATUS(ParseFlags(argc, argv)); return Run(); } TfLiteStatus BenchmarkModel::Run() { TF_LITE_ENSURE_STATUS(ValidateParams()); LogParams(); auto peak_memory_reporter = MayCreateMemoryUsageMonitor(); if (peak_memory_reporter != nullptr) peak_memory_reporter->Start(); const double model_size_mb = MayGetModelFileSize() / 1e6; const auto start_mem_usage = profiling::memory::GetMemoryUsage(); int64_t initialization_start_us = profiling::time::NowMicros(); TF_LITE_ENSURE_STATUS(Init()); const auto init_end_mem_usage = profiling::memory::GetMemoryUsage(); int64_t initialization_end_us = profiling::time::NowMicros(); int64_t startup_latency_us = initialization_end_us - initialization_start_us; const auto init_mem_usage = init_end_mem_usage - start_mem_usage; if (model_size_mb > 0) { TFLITE_LOG(INFO) << "The input model file size (MB): " << model_size_mb; } else { TFLITE_LOG(WARN) << "Failed to get the input model file size."; } TFLITE_LOG(INFO) << "Initialized session in " << startup_latency_us / 1e3 << "ms."; TF_LITE_ENSURE_STATUS(PrepareInputData()); TfLiteStatus status = kTfLiteOk; uint64_t input_bytes = ComputeInputBytes(); if (params_.Get<bool>("dry_run")) { params_.Set("warmup_runs", 0); params_.Set("warmup_min_secs", -1.0f); params_.Set("num_runs", 0); params_.Set("min_secs", -1.0f); } listeners_.OnBenchmarkStart(params_); Stat<int64_t> warmup_time_us = Run(params_.Get<int32_t>("warmup_runs"), params_.Get<float>("warmup_min_secs"), params_.Get<float>("max_secs"), WARMUP, &status); if (status != kTfLiteOk) { return status; } Stat<int64_t> inference_time_us = Run(params_.Get<int32_t>("num_runs"), params_.Get<float>("min_secs"), params_.Get<float>("max_secs"), REGULAR, &status); const auto overall_mem_usage = profiling::memory::GetMemoryUsage() - start_mem_usage; float peak_mem_mb = profiling::memory::MemoryUsageMonitor::kInvalidMemUsageMB; if (peak_memory_reporter != nullptr) { peak_memory_reporter->Stop(); peak_mem_mb = peak_memory_reporter->GetPeakMemUsageInMB(); } listeners_.OnBenchmarkEnd({model_size_mb, startup_latency_us, input_bytes, warmup_time_us, inference_time_us, init_mem_usage, overall_mem_usage, peak_mem_mb}); return status; } TfLiteStatus BenchmarkModel::ParseFlags(int* argc, char** argv) { auto flag_list = GetFlags(); const bool parse_result = Flags::Parse(argc, const_cast<const char**>(argv), flag_list); if (!parse_result || (params_.HasParam("help") && params_.Get<bool>("help"))) { std::string usage = Flags::Usage(argv[0], flag_list); TFLITE_LOG(ERROR) << usage; return kTfLiteError; } std::string unconsumed_args = Flags::ArgsToString(*argc, const_cast<const char**>(argv)); if (!unconsumed_args.empty()) { TFLITE_LOG(WARN) << "Unconsumed cmdline flags: " << unconsumed_args; } return kTfLiteOk; } std::unique_ptr<profiling::memory::MemoryUsageMonitor> BenchmarkModel::MayCreateMemoryUsageMonitor() const { if (!params_.Get<bool>("report_peak_memory_footprint")) return nullptr; return std::make_unique<profiling::memory::MemoryUsageMonitor>( params_.Get<int32_t>("memory_footprint_check_interval_ms")); } } }
#include "tensorflow/tools/benchmark/benchmark_model.h" #include <memory> #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/math_ops.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/util/stat_summarizer.h" namespace tensorflow { namespace { void CreateTestGraph(const ::tensorflow::Scope& root, benchmark_model::InputLayerInfo* input, string* output_name, GraphDef* graph_def) { const int input_width = 400; const int input_height = 10; input->shape = TensorShape({input_width, input_height}); input->data_type = DT_FLOAT; const TensorShape constant_shape({input_height, input_width}); Tensor constant_tensor(DT_FLOAT, constant_shape); test::FillFn<float>(&constant_tensor, [](int) -> float { return 3.0; }); auto placeholder = ops::Placeholder(root, DT_FLOAT, ops::Placeholder::Shape(input->shape)); input->name = placeholder.node()->name(); auto m = ops::MatMul(root, placeholder, constant_tensor); *output_name = m.node()->name(); TF_ASSERT_OK(root.ToGraphDef(graph_def)); } TEST(BenchmarkModelTest, InitializeAndRun) { const string dir = testing::TmpDir(); const string filename_pb = io::JoinPath(dir, "graphdef.pb"); auto root = Scope::NewRootScope().ExitOnError(); benchmark_model::InputLayerInfo input; string output_name; GraphDef graph_def; CreateTestGraph(root, &input, &output_name, &graph_def); string graph_def_serialized; graph_def.SerializeToString(&graph_def_serialized); TF_ASSERT_OK( WriteStringToFile(Env::Default(), filename_pb, graph_def_serialized)); std::unique_ptr<Session> session; std::unique_ptr<GraphDef> loaded_graph_def; TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_pb, &session, &loaded_graph_def)); std::unique_ptr<StatSummarizer> stats; stats = std::make_unique<tensorflow::StatSummarizer>(*(loaded_graph_def.get())); int64_t time; int64_t num_runs = 0; TF_ASSERT_OK(benchmark_model::TimeMultipleRuns( 0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(), &time, &num_runs)); ASSERT_EQ(num_runs, 10); } TEST(BenchmarkModeTest, TextProto) { const string dir = testing::TmpDir(); const string filename_txt = io::JoinPath(dir, "graphdef.pb.txt"); auto root = Scope::NewRootScope().ExitOnError(); benchmark_model::InputLayerInfo input; string output_name; GraphDef graph_def; CreateTestGraph(root, &input, &output_name, &graph_def); TF_ASSERT_OK(WriteTextProto(Env::Default(), filename_txt, graph_def)); std::unique_ptr<Session> session; std::unique_ptr<GraphDef> loaded_graph_def; TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_txt, &session, &loaded_graph_def)); std::unique_ptr<StatSummarizer> stats; stats = std::make_unique<tensorflow::StatSummarizer>(*(loaded_graph_def.get())); int64_t time; int64_t num_runs = 0; TF_ASSERT_OK(benchmark_model::TimeMultipleRuns( 0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(), &time, &num_runs)); ASSERT_EQ(num_runs, 10); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_model.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/benchmark/benchmark_model_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
713db5ba-df5e-4ccf-870c-7ebd13752780
cpp
tensorflow/tensorflow
merge
tensorflow/tools/proto_splitter/merge.cc
tensorflow/tools/proto_splitter/merge_test.cc
#include "tensorflow/tools/proto_splitter/merge.h" #include <algorithm> #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "riegeli/base/object.h" #include "riegeli/bytes/fd_reader.h" #include "riegeli/records/record_reader.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/file_system_helper.h" #include "tensorflow/tools/proto_splitter/cc/util.h" #include "tensorflow/tools/proto_splitter/chunk.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace tensorflow::tools::proto_splitter { using ::tensorflow::proto_splitter::ChunkedField; using ::tensorflow::proto_splitter::ChunkedMessage; using ::tensorflow::proto_splitter::ChunkInfo; using ::tensorflow::proto_splitter::ChunkMetadata; using ::tensorflow::proto_splitter::FieldIndex; using tools::proto_splitter::GetChunkMetadata; using tools::proto_splitter::GetRiegeliReader; using tools::proto_splitter::OnlyContainsPb; using tsl::protobuf::FieldDescriptor; using tsl::protobuf::Message; using tsl::protobuf::Reflection; absl::Status Merger::Merge(const std::vector<std::unique_ptr<Message>>& chunks, const ChunkedMessage& chunked_message, Message* merged_message) { riegeli::RecordReader<riegeli::FdReader<>> null_reader{riegeli::kClosed}; if (chunked_message.has_chunk_index()) { merged_message->MergeFrom(*chunks[chunked_message.chunk_index()].get()); } for (const auto& chunked_field : chunked_message.chunked_fields()) { absl::Status s = ProcessField(chunked_field, merged_message, {}, chunks, null_reader, MergerOp::MERGE); if (!s.ok()) return s; } return absl::OkStatus(); } absl::Status Merger::Read(std::string prefix, Message* merged_message) { uint64_t start_time = Env::Default()->NowMicros(); TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix)); if (only_contains_pb) { return ReadPb(absl::StrCat(prefix, ".pb"), merged_message); } TF_ASSIGN_OR_RETURN(auto reader, GetRiegeliReader(absl::StrCat(prefix, ".cpb"))); auto read_metadata = GetChunkMetadata(reader); if (!read_metadata.ok()) { reader.Close(); return absl::FailedPreconditionError( absl::StrCat("Couldn't read ChunkMetadata from chunked proto.\n", read_metadata.status().ToString())); } ChunkMetadata chunk_metadata = read_metadata.value(); std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>( chunk_metadata.chunks().begin(), chunk_metadata.chunks().end()); absl::Status s = ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message); reader.Close(); uint64_t end_time = Env::Default()->NowMicros(); LOG(INFO) << "Finished reading and merging chunked proto, took " << HumanReadableDuration(end_time - start_time) << "."; return s; } absl::Status Merger::ReadPartial(absl::string_view prefix, const ChunkMetadata& chunk_metadata, Message* merged_message) { uint64_t start_time = Env::Default()->NowMicros(); TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix)); if (only_contains_pb) { return absl::FailedPreconditionError( absl::StrCat("Attempting to read part of a chunked proto .cpb file, " "but only found a regular proto: ", prefix, ".pb")); } TF_ASSIGN_OR_RETURN(auto reader, GetRiegeliReader(absl::StrCat(prefix, ".cpb"))); std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>( chunk_metadata.chunks().begin(), chunk_metadata.chunks().end()); absl::Status s = ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message); reader.Close(); uint64_t end_time = Env::Default()->NowMicros(); LOG(INFO) << "Finished reading and merging chunked proto, took " << HumanReadableDuration(end_time - start_time) << "."; return s; } absl::Status Merger::ReadPb(const std::string& pb_file, Message* merged_message) { uint64_t start_time = Env::Default()->NowMicros(); TF_ASSIGN_OR_RETURN(bool file_exists, internal::FileExists(Env::Default(), pb_file)); if (!file_exists) return absl::NotFoundError(absl::StrCat("File not found: ", pb_file)); LOG(INFO) << "Reading binary proto from " << pb_file; auto ret = ReadBinaryProto(Env::Default(), pb_file, merged_message); uint64_t end_time = Env::Default()->NowMicros(); LOG(INFO) << "Finished reading binary proto, took " << HumanReadableDuration(end_time - start_time) << "."; return ret; } absl::Status Merger::ReadFields( const ChunkedMessage& chunked_message, riegeli::RecordReader<riegeli::FdReader<>>& reader, const std::vector<ChunkInfo>& chunks_info, tsl::protobuf::Message* merged_message) { if (chunked_message.has_chunk_index()) { TF_ASSIGN_OR_RETURN( std::string chunk, ReadChunk(reader, chunks_info[chunked_message.chunk_index()])); if (!merged_message->MergeFromString(chunk)) { return absl::FailedPreconditionError( "Couldn't merge chunk into message."); } } std::vector<ChunkedField> chunked_fields( chunked_message.chunked_fields().begin(), chunked_message.chunked_fields().end()); absl::Status sort_status = absl::OkStatus(); std::sort( chunked_fields.begin(), chunked_fields.end(), [&sort_status](ChunkedField cf1, ChunkedField cf2) { int tag_depth = std::min(cf1.field_tag().size(), cf2.field_tag().size()); for (int depth = 0; depth < tag_depth; ++depth) { FieldIndex tag1 = cf1.field_tag()[depth]; FieldIndex tag2 = cf2.field_tag()[depth]; if (tag1.has_field() && tag2.has_field()) { uint32_t field1 = tag1.field(); uint32_t field2 = tag2.field(); if (field1 != field2) return field1 < field2; } else if (tag1.has_index() && tag2.has_index()) { uint64_t index1 = tag1.index(); uint64_t index2 = tag2.index(); if (index1 != index2) return index1 < index2; } else if (tag1.has_map_key() && tag2.has_map_key()) { return false; } else { sort_status = absl::FailedPreconditionError("Field tag mismatch"); return false; } } if (cf1.field_tag().size() == cf2.field_tag().size()) { return cf1.message().chunk_index() < cf2.message().chunk_index(); } return cf1.field_tag().size() < cf2.field_tag().size(); }); if (!sort_status.ok()) return sort_status; for (const auto& chunked_field : chunked_fields) { absl::Status s = ProcessField(chunked_field, merged_message, chunks_info, {}, reader, MergerOp::READ); if (!s.ok()) return s; } return absl::OkStatus(); } absl::Status Merger::ProcessField( const ChunkedField& chunked_field, Message* merged_message, const std::vector<ChunkInfo>& chunks_info, const std::vector<std::unique_ptr<Message>>& chunks, riegeli::RecordReader<riegeli::FdReader<>>& reader, MergerOp op) { std::string chunk; switch (op) { case MergerOp::READ: { TF_ASSIGN_OR_RETURN( chunk, ReadChunk(reader, chunks_info[chunked_field.message().chunk_index()])); break; } case MergerOp::MERGE: { chunk = chunks[chunked_field.message().chunk_index()]->SerializeAsString(); break; } } if (chunked_field.field_tag().empty()) { merged_message->MergeFromString(chunk); return absl::OkStatus(); } uint64_t field_index; Message* curr_message = merged_message; TF_ASSIGN_OR_RETURN(const std::vector<Field> fields, GetFieldTypes(chunked_field.field_tag())); const FieldDescriptor* field_desc = nullptr; for (const auto& field : fields) { merged_message = curr_message; field_desc = merged_message->GetDescriptor()->FindFieldByNumber( std::get<int>(field.first)); auto res = GetMutableField(merged_message, field); if (!res.ok()) { if (!absl::IsNotFound(res.status())) return res.status(); if (field_desc->is_map()) { TF_RETURN_IF_ERROR( AddMapEntry(curr_message, field_desc, field.second.value())); res = GetMutableField(curr_message, field); } else { curr_message->GetReflection()->AddMessage(curr_message, field_desc); res = GetMutableField(curr_message, field); } } auto [parent, mutable_field, mutable_field_index] = res.value(); if (mutable_field->is_repeated() && mutable_field_index != -1) { field_index = mutable_field_index; curr_message = parent->GetReflection()->MutableRepeatedMessage( parent, mutable_field, std::max(0, mutable_field_index)); if (mutable_field->is_map()) { field_desc = mutable_field->message_type()->FindFieldByNumber(2); merged_message = curr_message; curr_message = curr_message->GetReflection()->MutableMessage( curr_message, field_desc); } } else if (mutable_field->type() == FieldDescriptor::Type::TYPE_MESSAGE) { curr_message = parent->GetReflection()->MutableMessage(parent, mutable_field); } } const Reflection* reflection = merged_message->GetReflection(); if (field_desc->is_repeated()) { auto message_callback = [&reflection, &merged_message, &field_index, &op, &chunks, &chunked_field, &reader, &chunks_info, &field_desc]() -> absl::Status { for (int _ = reflection->FieldSize(*merged_message, field_desc); _ <= field_index; _++) { reflection->AddMessage(merged_message, field_desc); } switch (op) { case MergerOp::MERGE: TF_RETURN_IF_ERROR( Merge(chunks, chunked_field.message(), reflection->MutableRepeatedMessage( merged_message, field_desc, field_index))); break; case MergerOp::READ: TF_RETURN_IF_ERROR( ReadFields(chunked_field.message(), reader, chunks_info, reflection->MutableRepeatedMessage( merged_message, field_desc, field_index))); break; default: return absl::InternalError("Encountered unknown MergerOp."); } return absl::OkStatus(); }; TF_RETURN_IF_ERROR(SetRepeatedFieldElement( merged_message, field_desc, field_index, chunk, message_callback)); } else { auto message_callback = [&reflection, &merged_message, &op, &chunks, &chunked_field, &reader, &chunks_info, &field_desc]() -> absl::Status { switch (op) { case MergerOp::MERGE: TF_RETURN_IF_ERROR( Merge(chunks, chunked_field.message(), reflection->MutableMessage(merged_message, field_desc))); break; case MergerOp::READ: TF_RETURN_IF_ERROR(ReadFields( chunked_field.message(), reader, chunks_info, reflection->MutableMessage(merged_message, field_desc))); break; default: return absl::InternalError("Encountered unknown MergerOp."); } return absl::OkStatus(); }; TF_RETURN_IF_ERROR( SetFieldElement(merged_message, field_desc, chunk, message_callback)); } return absl::OkStatus(); } }
#include "tensorflow/tools/proto_splitter/merge.h" #include <array> #include <memory> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/str_cat.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/saved_model.pb.h" #include "tensorflow/tools/proto_splitter/cc/test_util.h" #include "tensorflow/tools/proto_splitter/cc/util.h" #include "tensorflow/tools/proto_splitter/chunk.pb.h" #include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace tensorflow::tools::proto_splitter { namespace { inline constexpr std::array kDFSplitTreeChunks = { "val: \"0\"", "val: \"010\"", "val: \"01020\"", "val: \"0102030\"", "val: \"0102031\"", "val: \"0102032\"", "val: \"01021\"", "val: \"0102130\"", "val: \"0102131\"", "val: \"0102132\""}; inline constexpr std::array kBFSplitTreeChunks = { "val: \"0\"", "val: \"010\"", "val: \"01020\"", "val: \"01021\"", "val: \"0102030\"", "val: \"0102031\"", "val: \"0102032\"", "val: \"0102130\"", "val: \"0102131\"", "val: \"0102132\""}; TEST(MergeTest, TestReadRiegeliTreeDepthFirst) { const std::string cpb_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "df-split-tree"); ::tensorflow::proto_splitter_testdata::StringNode merged_tree; TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree)); const std::string pbtxt_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-tree"); ::tensorflow::proto_splitter_testdata::StringNode test_proto; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto)); ASSERT_THAT(merged_tree, EqualsProto(test_proto)); } TEST(MergeTest, TestReadRiegeliTreeBreadthFirst) { const std::string cpb_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "bf-split-tree"); ::tensorflow::proto_splitter_testdata::StringNode merged_tree; TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree)); const std::string pbtxt_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-tree"); ::tensorflow::proto_splitter_testdata::StringNode test_proto; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto)); ASSERT_THAT(merged_tree, EqualsProto(test_proto)); } TEST(MergeTest, TestMergeTreeChunksDepthFirst) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "df-split-tree"); std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks; for (const auto& chunk : kDFSplitTreeChunks) { ::tensorflow::proto_splitter_testdata::StringNode string_node; ::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node); std::unique_ptr<::tsl::protobuf::Message> node = std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>( string_node); chunks.push_back(std::move(node)); } std::string split_tree_metadata; TF_ASSERT_OK(tsl::ReadFileToString( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata)); ::tensorflow::proto_splitter::ChunkedMessage chunked_message; ::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata, &chunked_message); ::tensorflow::proto_splitter_testdata::StringNode merged_tree; TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree)); const std::string pbtxt_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-tree"); ::tensorflow::proto_splitter_testdata::StringNode test_proto; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto)); ASSERT_THAT(merged_tree, EqualsProto(test_proto)); } TEST(MergeTest, TestMergeTreeChunksBreadthFirst) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "bf-split-tree"); std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks; for (const auto& chunk : kBFSplitTreeChunks) { ::tensorflow::proto_splitter_testdata::StringNode string_node; ::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node); std::unique_ptr<::tsl::protobuf::Message> node = std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>( string_node); chunks.push_back(std::move(node)); } std::string split_tree_metadata; TF_ASSERT_OK(tsl::ReadFileToString( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata)); ::tensorflow::proto_splitter::ChunkedMessage chunked_message; ::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata, &chunked_message); ::tensorflow::proto_splitter_testdata::StringNode merged_tree; TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree)); const std::string pbtxt_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-tree"); ::tensorflow::proto_splitter_testdata::StringNode test_proto; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto)); ASSERT_THAT(merged_tree, EqualsProto(test_proto)); } TEST(MergeTest, TestReadGraphDefLotsNodes) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-lots-nodes"); GraphDef merged_graph_def; TF_ASSERT_OK(Merger::Read(path, &merged_graph_def)); GraphDef test_graph_def; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def)); ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def)); } TEST(MergeTest, TestReadGraphDefLargeNodes) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-large-nodes"); GraphDef merged_graph_def; TF_ASSERT_OK(Merger::Read(path, &merged_graph_def)); GraphDef test_graph_def; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def)); ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def)); } TEST(MergeTest, TestReadGraphDefLargeConstant) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-large-constant"); GraphDef merged_graph_def; TF_ASSERT_OK(Merger::Read(path, &merged_graph_def)); GraphDef test_graph_def; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def)); ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def)); } TEST(MergeTest, TestReadManyField) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field"); ::tensorflow::proto_splitter_testdata::ManyFields merged_many_field; TF_ASSERT_OK(Merger::Read(path, &merged_many_field)); ::tensorflow::proto_splitter_testdata::ManyFields test_many_field; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_many_field)); ASSERT_THAT(merged_many_field, EqualsProto(test_many_field)); } TEST(MergeTest, TestReadSavedModel) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-standard"); SavedModel merged_saved_model; TF_ASSERT_OK(Merger::Read(path, &merged_saved_model)); SavedModel test_saved_model; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model)); ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model)); } TEST(MergeTest, TestReadChunkedModel) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "chunked_saved_model/chunked_model/saved_model"); SavedModel merged_saved_model; TF_ASSERT_OK(Merger::Read(path, &merged_saved_model)); SavedModel test_saved_model; TF_ASSERT_OK(tsl::ReadTextProto( tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model)); ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model)); } TEST(MergeTest, TestReadPartial) { const std::string path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field"); TF_ASSERT_OK_AND_ASSIGN(auto reader, tools::proto_splitter::GetRiegeliReader( absl::StrCat(path, ".cpb"))); auto read_metadata = GetChunkMetadata(reader); if (!read_metadata.ok()) { reader.Close(); TF_ASSERT_OK(read_metadata.status()); } ::tensorflow::proto_splitter::ChunkMetadata chunk_metadata = read_metadata.value(); ::tensorflow::proto_splitter::ChunkMetadata partial_chunk_metadata; partial_chunk_metadata.mutable_chunks()->CopyFrom(chunk_metadata.chunks()); partial_chunk_metadata.mutable_message()->set_chunk_index( chunk_metadata.message().chunk_index()); proto_splitter_testdata::ManyFields merged_many_fields; TF_ASSERT_OK( Merger::ReadPartial(path, partial_chunk_metadata, &merged_many_fields)); ASSERT_THAT(merged_many_fields, EqualsProto(R"pb( map_field_int64 { key: -1345 value: "map_value_-1345" } )pb")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/merge.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/merge_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ef944928-2288-4478-a959-a120c27ef729
cpp
tensorflow/tensorflow
graph_def_splitter
tensorflow/tools/proto_splitter/cc/graph_def_splitter.cc
tensorflow/tools/proto_splitter/cc/graph_def_splitter_test.cc
#include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h" #include <cstddef> #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/tools/proto_splitter/cc/composable_splitter.h" #include "tensorflow/tools/proto_splitter/cc/composable_splitter_base.h" #include "tensorflow/tools/proto_splitter/cc/large_node_splitter.h" #include "tensorflow/tools/proto_splitter/cc/max_size.h" #include "tensorflow/tools/proto_splitter/cc/repeated_field_splitter.h" #include "tensorflow/tools/proto_splitter/cc/size_splitter.h" #include "tensorflow/tools/proto_splitter/cc/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace tensorflow::tools::proto_splitter { namespace { using namespace std::string_literals; class ConstantSplitter : public SizeSplitter { public: using SizeSplitter::SizeSplitter; absl::StatusOr<int> BuildChunksReturnSize() override { NodeDef* node = tsl::protobuf::DynamicCastToGenerated<NodeDef>(message()); std::vector<FieldType> tensor_field = {"attr"s, "value"s, "tensor"s}; std::vector<FieldType> content_field = {"attr"s, "value"s, "tensor"s, "tensor_content"s}; TF_ASSIGN_OR_RETURN(auto ret, GetMutableField(node, tensor_field)); auto tensor_msg = ret.parent->GetReflection()->MutableMessage(ret.parent, ret.field); TensorProto* tensor_proto = tsl::protobuf::DynamicCastToGenerated<TensorProto>(tensor_msg); int size_diff; if (tensor_proto->tensor_content().empty()) { Tensor t; if (!t.FromProto(*tensor_proto)) { return absl::InvalidArgumentError( "Invalid Const NodeDef.attr[\"value\"].tensor value."); } TensorProto container; t.AsProtoTensorContent(&container); size_diff = container.tensor_content().size(); auto x = std::make_unique<std::string>( std::move(*container.mutable_tensor_content())); auto y = std::make_unique<MessageBytes>(std::move(*x)); TF_RETURN_IF_ERROR(AddChunk(std::move(y), &content_field)); } else { size_diff = tensor_proto->tensor_content().size(); auto x = std::make_unique<std::string>( std::move(*tensor_proto->mutable_tensor_content())); auto y = std::make_unique<MessageBytes>(std::move(*x)); TF_RETURN_IF_ERROR(AddChunk(std::move(y), &content_field)); } auto dtype = tensor_proto->dtype(); auto tensor_shape = tensor_proto->tensor_shape(); auto version_number = tensor_proto->version_number(); tensor_proto->Clear(); tensor_proto->set_dtype(dtype); *tensor_proto->mutable_tensor_shape() = tensor_shape; tensor_proto->set_version_number(version_number); return size_diff; } }; class ConstantSplitterFactory : public SizeSplitterFactory { public: using SizeSplitterFactory::SizeSplitterFactory; absl::StatusOr<std::unique_ptr<SizeSplitter>> CreateSplitter( tsl::protobuf::Message* message, ComposableSplitterBase* parent_splitter, std::vector<FieldType>* fields_in_parent, int size) override { if (size < GetMaxSize()) return nullptr; NodeDef* node = tsl::protobuf::DynamicCastToGenerated<NodeDef>(message); if (node->op() != "Const") return absl::UnimplementedError(absl::StrCat( "Currently only able to split 'Const' nodes that are larger than the " "2GB maximum proto size. Got node of type '", node->op(), "' with size: ", size, ".")); ConstantSplitter* splitter = new ConstantSplitter(message, parent_splitter, fields_in_parent); return absl::WrapUnique(splitter); } }; class FunctionDefSplitter : public SizeSplitter { public: using SizeSplitter::SizeSplitter; absl::StatusOr<int> BuildChunksReturnSize() override { size_t current_size = GetInitialSize(); uint64_t max_size = GetMaxSize(); std::vector<FieldType> fields = {}; if (LARGE_SIZE_CHECK(current_size, max_size) && current_size < max_size) { auto splitter = LargeNodeSplitter<FunctionDef>(message(), this, &fields); splitter.SetInitialSize(current_size); return splitter.BuildChunksReturnSize(); } else if (current_size > max_size) { ConstantSplitterFactory constant_splitter_factory; LargeNodeSplitterFactory<NodeDef> large_node_splitter_factory; std::vector<SizeSplitterFactory*> factories = { &constant_splitter_factory, &large_node_splitter_factory}; auto ret = RepeatedFieldSplitter<FunctionDef, NodeDef>::Create( message(), this, &fields, "node_def"s, &factories); if (!ret.ok()) return ret.status(); auto splitter = ret.value(); return splitter.BuildChunksReturnSize(); } return 0; } }; class FunctionDefSplitterFactory : public SizeSplitterFactory { public: using SizeSplitterFactory::SizeSplitterFactory; absl::StatusOr<std::unique_ptr<SizeSplitter>> CreateSplitter( tsl::protobuf::Message* message, ComposableSplitterBase* parent_splitter, std::vector<FieldType>* fields_in_parent, int size) override { FunctionDefSplitter* splitter = new FunctionDefSplitter(message, parent_splitter, fields_in_parent); return absl::WrapUnique(splitter); } }; } absl::Status GraphDefSplitter::BuildChunks() { TF_RETURN_IF_ERROR(SetMessageAsBaseChunk()); GraphDef* g = tsl::protobuf::DynamicCastToGenerated<GraphDef>(message()); uint64_t max_size = GetMaxSize(); size_t graph_size = GetInitialSize(); if (graph_size < max_size) return absl::OkStatus(); std::vector<FieldType> field_in_parent = {}; ConstantSplitterFactory constant_splitter_factory; LargeNodeSplitterFactory<NodeDef> large_node_splitter_factory; std::vector<SizeSplitterFactory*> factories = {&constant_splitter_factory, &large_node_splitter_factory}; auto node_splitter_ret = RepeatedFieldSplitter<GraphDef, NodeDef>::Create( g, this, &field_in_parent, "node"s, &factories); if (!node_splitter_ret.ok()) return node_splitter_ret.status(); auto node_splitter = node_splitter_ret.value(); FunctionDefSplitterFactory function_splitter_factory; std::vector<FieldType> library_field = {"library"s}; std::vector<SizeSplitterFactory*> fn_factories = {&function_splitter_factory}; auto library_splitter_ret = RepeatedFieldSplitter<FunctionDefLibrary, FunctionDef>::Create( g->mutable_library(), this, &library_field, "function"s, &fn_factories); if (!library_splitter_ret.ok()) return library_splitter_ret.status(); auto library_splitter = library_splitter_ret.value(); size_t library_size = g->library().ByteSizeLong(); library_splitter.SetInitialSize(library_size); size_t approx_node_size = graph_size - library_size; node_splitter.SetInitialSize(approx_node_size); if (library_size > approx_node_size) { TF_ASSIGN_OR_RETURN(int size_diff, library_splitter.BuildChunksReturnSize()); library_size -= size_diff; if (approx_node_size + library_size > max_size) { TF_ASSIGN_OR_RETURN(int size_diff, node_splitter.BuildChunksReturnSize()); approx_node_size -= size_diff; } } else { TF_ASSIGN_OR_RETURN(int size_diff, node_splitter.BuildChunksReturnSize()); approx_node_size -= size_diff; if (approx_node_size + library_size > max_size) { TF_ASSIGN_OR_RETURN(int size_diff, library_splitter.BuildChunksReturnSize()); library_size -= size_diff; } } if (g->ByteSizeLong() > max_size) { LargeNodeSplitter<FunctionDefLibrary> entire_library_splitter( g->mutable_library(), this, &library_field); int index = 1; entire_library_splitter.SetChunkIndex(&index); TF_RETURN_IF_ERROR(entire_library_splitter.BuildChunks()); } return absl::OkStatus(); } }
#include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h" #include <cstdint> #include <memory> #include <string> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/cord.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/tools/proto_splitter/cc/max_size.h" #include "tensorflow/tools/proto_splitter/cc/test_util.h" #include "tensorflow/tools/proto_splitter/cc/util.h" #include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace tools::proto_splitter { namespace { using ::tensorflow::proto_splitter::ChunkedMessage; TEST(GraphDefSplitterTest, TestLargeConstant) { GraphDef proto; const std::string graph_def_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-large-constant.pb"); int64_t max_size = 500; DebugSetMaxSize(max_size); TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(), graph_def_path, &proto)); EXPECT_GE(proto.ByteSizeLong(), GetMaxSize()); std::string large_constant_1, large_constant_2; const std::variant<std::string, absl::Cord>& tensor_constant_1 = proto.node(2).attr().at("value").tensor().tensor_content(); const std::variant<std::string, absl::Cord>& tensor_constant_2 = proto.node(4).attr().at("value").tensor().tensor_content(); if (std::holds_alternative<std::string>(tensor_constant_1)) { large_constant_1 = std::get<std::string>(tensor_constant_1); } else { absl::CopyCordToString(std::get<absl::Cord>(tensor_constant_1), &large_constant_1); } if (std::holds_alternative<std::string>(tensor_constant_2)) { large_constant_2 = std::get<std::string>(tensor_constant_2); } else { absl::CopyCordToString(std::get<absl::Cord>(tensor_constant_2), &large_constant_2); } GraphDefSplitter splitter(&proto); TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split()); ChunkedMessage* chunked_message = x.chunked_message; ASSERT_NE(chunked_message, nullptr); EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunk_index: 0 chunked_fields { field_tag { field: 1 } field_tag { index: 2 } field_tag { field: 5 } field_tag { map_key { s: "value" } } field_tag { field: 8 } field_tag { field: 4 } message { chunk_index: 1 } } chunked_fields { field_tag { field: 1 } field_tag { index: 4 } field_tag { field: 5 } field_tag { map_key { s: "value" } } field_tag { field: 8 } field_tag { field: 4 } message { chunk_index: 2 } })pb")); std::vector<MessageBytes>* chunks = x.chunks; ASSERT_NE(chunks, nullptr); EXPECT_CHUNK_SIZES(chunks, max_size); EXPECT_THAT((*chunks)[1], ::testing::VariantWith<std::string>(large_constant_1)); EXPECT_THAT((*chunks)[2], ::testing::VariantWith<std::string>(large_constant_2)); } TEST(GraphDefSplitterTest, TestLargeNodes) { GraphDef proto; const std::string graph_def_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-large-nodes.pb"); int64_t max_size = 200; DebugSetMaxSize(max_size); TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(), graph_def_path, &proto)); EXPECT_GE(proto.ByteSize(), GetMaxSize()); NodeDef node_1 = proto.node(1); NodeDef node_2 = proto.node(2); NodeDef node_3 = proto.node(3); NodeDef node_5 = proto.node(5); GraphDefSplitter splitter(&proto); TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split()); ChunkedMessage* chunked_message = x.chunked_message; ASSERT_NE(chunked_message, nullptr); EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunk_index: 0 chunked_fields { field_tag { field: 1 } field_tag { index: 1 } message { chunk_index: 1 } } chunked_fields { field_tag { field: 1 } field_tag { index: 2 } message { chunk_index: 2 } } chunked_fields { field_tag { field: 1 } field_tag { index: 3 } message { chunk_index: 3 } } chunked_fields { field_tag { field: 1 } field_tag { index: 5 } message { chunk_index: 4 } })pb")); std::vector<MessageBytes>* chunks = x.chunks; ASSERT_NE(chunks, nullptr); EXPECT_CHUNK_SIZES(chunks, max_size); EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( (*chunks)[1])); EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( (*chunks)[2])); EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( (*chunks)[3])); EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( (*chunks)[4])); EXPECT_THAT( *std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[1]).get(), EqualsProto(node_1)); EXPECT_THAT( *std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[2]).get(), EqualsProto(node_2)); EXPECT_THAT( *std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[3]).get(), EqualsProto(node_3)); EXPECT_THAT( *std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[4]).get(), EqualsProto(node_5)); } TEST(GraphDefSplitterTest, TestLotsNodes) { GraphDef proto; const std::string graph_def_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "split-lots-nodes.pb"); int64_t max_size = 96 * 5; DebugSetMaxSize(max_size); TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(), graph_def_path, &proto)); EXPECT_GE(proto.ByteSize(), GetMaxSize()); int expected_node_size = proto.node_size(); GraphDefSplitter splitter(&proto); TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split()); ChunkedMessage* chunked_message = x.chunked_message; ASSERT_NE(chunked_message, nullptr); EXPECT_THAT( *chunked_message, EqualsProto(R"pb(chunk_index: 0 chunked_fields { message { chunk_index: 1 } } chunked_fields { message { chunk_index: 2 } } chunked_fields { message { chunk_index: 3 } } chunked_fields { message { chunk_index: 4 } })pb")); std::vector<MessageBytes>* chunks = x.chunks; ASSERT_NE(chunks, nullptr); EXPECT_CHUNK_SIZES(chunks, max_size); int actual_node_size = 0; for (MessageBytes& chunk : *chunks) { GraphDef* message = nullptr; if (std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( chunk)) { message = tsl::protobuf::DynamicCastToGenerated<GraphDef>( std::get<std::shared_ptr<tsl::protobuf::Message>>(chunk).get()); } else if (std::holds_alternative<tsl::protobuf::Message*>(chunk)) { message = tsl::protobuf::DynamicCastToGenerated<GraphDef>( std::get<tsl::protobuf::Message*>(chunk)); } else { EXPECT_FALSE(std::holds_alternative<std::string>(chunk)); } actual_node_size += message->node_size(); } EXPECT_EQ(actual_node_size, expected_node_size); } TEST(GraphDefSplitterTest, TestFunctionLotsOfNodes) { GraphDef proto; const std::string graph_def_path = io::JoinPath( testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "function-lots-of-nodes.pb"); int64_t max_size = 500; DebugSetMaxSize(max_size); TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(), graph_def_path, &proto)); EXPECT_GE(proto.ByteSize(), GetMaxSize()); GraphDefSplitter splitter(&proto); TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split()); std::vector<MessageBytes>* chunks = x.chunks; ASSERT_NE(chunks, nullptr); EXPECT_CHUNK_SIZES(chunks, max_size); } TEST(GraphDefSplitterTest, TestFunctionLargeNodes) { GraphDef proto; const std::string graph_def_path = io::JoinPath(testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "function-large-nodes.pb"); int64_t max_size = 200; DebugSetMaxSize(max_size); TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(), graph_def_path, &proto)); EXPECT_GE(proto.ByteSize(), GetMaxSize()); GraphDefSplitter splitter(&proto); TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split()); std::vector<MessageBytes>* chunks = x.chunks; ASSERT_NE(chunks, nullptr); EXPECT_CHUNK_SIZES(chunks, max_size); } TEST(GraphDefSplitterTest, TestGraphAndFunction) { GraphDef proto; const std::string graph_def_path = io::JoinPath( testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "graph-def-and-function.pb"); int64_t max_size = 200; DebugSetMaxSize(max_size); TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(), graph_def_path, &proto)); EXPECT_GE(proto.ByteSize(), GetMaxSize()); GraphDefSplitter splitter(&proto); TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split()); std::vector<MessageBytes>* chunks = x.chunks; ASSERT_NE(chunks, nullptr); EXPECT_CHUNK_SIZES(chunks, max_size); TF_ASSERT_OK(splitter.Write("/tmp/hoi")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/graph_def_splitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/graph_def_splitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ced0654c-b312-4590-8e3b-72ba1bdfd36c
cpp
tensorflow/tensorflow
saved_model_splitter
tensorflow/tools/proto_splitter/cc/saved_model_splitter.cc
tensorflow/tools/proto_splitter/cc/saved_model_splitter_test.cc
#include "tensorflow/tools/proto_splitter/cc/saved_model_splitter.h" #include <vector> #include "absl/status/status.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/protobuf/meta_graph.pb.h" #include "tensorflow/core/protobuf/saved_model.pb.h" #include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h" #include "tensorflow/tools/proto_splitter/cc/large_node_splitter.h" #include "tensorflow/tools/proto_splitter/cc/max_size.h" #include "tensorflow/tools/proto_splitter/cc/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/protobuf.h" namespace tensorflow { namespace tools::proto_splitter { using namespace std::string_literals; absl::Status SavedModelSplitter::BuildChunks() { TF_RETURN_IF_ERROR(SetMessageAsBaseChunk()); SavedModel* sm = tsl::protobuf::DynamicCastToGenerated<SavedModel>(message()); int max_size = GetMaxSize(); if (GetInitialSize() < max_size) return absl::OkStatus(); std::vector<FieldType> fields_to_graph_def = {"meta_graphs"s, 0, "graph_def"s}; GraphDefSplitter graph_def_splitter( sm->mutable_meta_graphs(0)->mutable_graph_def(), this, &fields_to_graph_def); TF_RETURN_IF_ERROR(graph_def_splitter.BuildChunks()); if (sm->ByteSizeLong() < max_size) return absl::OkStatus(); LargeNodeSplitter<GraphDef> entire_graph_splitter( sm->mutable_meta_graphs(0)->mutable_graph_def(), this, &fields_to_graph_def); int index = 1; entire_graph_splitter.SetChunkIndex(&index); TF_RETURN_IF_ERROR(entire_graph_splitter.BuildChunks()); return absl::OkStatus(); } } }
#include "tensorflow/tools/proto_splitter/cc/saved_model_splitter.h" #include <cstdint> #include <memory> #include <string> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/saved_model.pb.h" #include "tensorflow/tools/proto_splitter/cc/max_size.h" #include "tensorflow/tools/proto_splitter/cc/util.h" #include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace tools::proto_splitter { namespace { #define EXPECT_CHUNK_SIZES(chunks, max_size) \ do { \ for (auto chunk : *chunks) { \ if (std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( \ chunk)) { \ EXPECT_LE(std::get<std::shared_ptr<tsl::protobuf::Message>>(chunk) \ ->ByteSizeLong(), \ max_size); \ } else if (std::holds_alternative<tsl::protobuf::Message*>(chunk)) { \ EXPECT_LE(std::get<tsl::protobuf::Message*>(chunk)->ByteSizeLong(), \ max_size); \ } \ } \ } while (0) std::string NonChunkedSavedModel() { return io::JoinPath(testing::TensorFlowSrcRoot(), "cc", "saved_model", "testdata", "chunked_saved_model", "non_chunked_model", "saved_model.pb"); } TEST(SavedModelSplitterTest, TestSplit) { SavedModel proto; int64_t max_size = 80000; DebugSetMaxSize(max_size); TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(), NonChunkedSavedModel(), &proto)); EXPECT_GE(proto.ByteSizeLong(), GetMaxSize()); SavedModelSplitter splitter(&proto); TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split()); std::vector<MessageBytes>* chunks = x.chunks; ASSERT_NE(chunks, nullptr); EXPECT_EQ(2, chunks->size()); EXPECT_CHUNK_SIZES(chunks, max_size); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/saved_model_splitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/saved_model_splitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
eb77139f-966b-4ca7-ae72-c67ca1fd73e3
cpp
tensorflow/tensorflow
gen_proto_text_functions_lib
tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc
tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc
#include "tensorflow/tools/proto_text/gen_proto_text_functions_lib.h" #include <algorithm> #include <set> #include <string> #include <unordered_set> #include "absl/strings/string_view.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/types.h" using ::tensorflow::protobuf::Descriptor; using ::tensorflow::protobuf::EnumDescriptor; using ::tensorflow::protobuf::FieldDescriptor; using ::tensorflow::protobuf::FieldOptions; using ::tensorflow::protobuf::FileDescriptor; namespace tensorflow { namespace { template <typename... Args> string StrCat(const Args&... args) { std::ostringstream s; std::vector<int> give_me_a_name{((s << args), 0)...}; return s.str(); } template <typename... Args> string StrAppend(string* to_append, const Args&... args) { *to_append += StrCat(args...); return *to_append; } class Generator { public: explicit Generator(const string& tf_header_prefix) : tf_header_prefix_(tf_header_prefix), header_(&code_.header), header_impl_(&code_.header_impl), cc_(&code_.cc) {} void Generate(const FileDescriptor& fd); ProtoTextFunctionCode code() const { return code_; } private: struct Section { explicit Section(string* str) : str(str) {} string* str; string indent; }; Generator& SetOutput(Section* section) { cur_ = section; return *this; } Generator& Nest() { StrAppend(&cur_->indent, " "); return *this; } Generator& Unnest() { cur_->indent = cur_->indent.substr(0, cur_->indent.size() - 2); return *this; } template <typename... Args> Generator& Print(Args... args) { StrAppend(cur_->str, cur_->indent, args..., "\n"); return *this; } void AppendFieldValueAppend(const FieldDescriptor& field, const bool omit_default, const string& field_expr); void AppendFieldAppend(const FieldDescriptor& field); void AppendDebugStringFunctions(const Descriptor& md); void AppendEnumFunctions(const EnumDescriptor& enum_d); void AppendParseMessageFunction(const Descriptor& md); void AppendMessageFunctions(const Descriptor& md); void AddNamespaceToCurrentSection(absl::string_view package, bool open); void AddHeadersToCurrentSection(const std::vector<string>& headers); const string tf_header_prefix_; ProtoTextFunctionCode code_; Section* cur_ = nullptr; Section header_; Section header_impl_; Section cc_; std::unordered_set<string> map_append_signatures_included_; Generator(const Generator&) = delete; void operator=(const Generator&) = delete; }; string GetPackageReferencePrefix(const FileDescriptor* fd) { string result = "::"; absl::string_view package = fd->package(); for (size_t i = 0; i < package.size(); ++i) { if (package[i] == '.') { result += "::"; } else { result += package[i]; } } result += "::"; return result; } string GetClassName(const Descriptor& d) { if (d.containing_type() == nullptr) return std::string(d.name()); return StrCat(GetClassName(*d.containing_type()), "_", d.name()); } string GetClassName(const EnumDescriptor& ed) { if (ed.containing_type() == nullptr) return std::string(ed.name()); return StrCat(GetClassName(*ed.containing_type()), "_", ed.name()); } string GetQualifiedName(const Descriptor& d) { return StrCat(GetPackageReferencePrefix(d.file()), GetClassName(d)); } string GetQualifiedName(const EnumDescriptor& d) { return StrCat(GetPackageReferencePrefix(d.file()), GetClassName(d)); } string GetQualifiedAppendFn(const Descriptor& d) { return StrCat(GetPackageReferencePrefix(d.file()), "internal::AppendProtoDebugString"); } string GetEnumNameFn(const EnumDescriptor& enum_d) { return StrCat("EnumName_", GetClassName(enum_d)); } string GetQualifiedEnumNameFn(const EnumDescriptor& enum_d) { return StrCat(GetPackageReferencePrefix(enum_d.file()), GetEnumNameFn(enum_d)); } string GetProtoTextHeaderName(const FileDescriptor& fd, bool impl) { const int dot_index = fd.name().find_last_of('.'); return StrCat(fd.name().substr(0, dot_index), (impl ? ".pb_text-impl.h" : ".pb_text.h")); } string GetProtoHeaderName(const FileDescriptor& fd) { const int dot_index = fd.name().find_last_of('.'); return StrCat(fd.name().substr(0, dot_index), ".pb.h"); } string GetCppClass(const FieldDescriptor& d) { string cpp_class = d.cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE ? GetQualifiedName(*d.message_type()) : d.cpp_type_name(); if (cpp_class == "int64") { cpp_class = kProtobufInt64Typename; } return cpp_class; } string GetHeaderGuard(const FileDescriptor& fd, bool impl) { string s(fd.name()); std::replace(s.begin(), s.end(), '/', '_'); std::replace(s.begin(), s.end(), '.', '_'); return s + (impl ? "_IMPL_H_" : "_H_"); } void Generator::AppendFieldValueAppend(const FieldDescriptor& field, const bool omit_default, const string& field_expr) { CHECK(!field.has_presence() || field.containing_oneof() != nullptr || field.cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) << field.file()->name(); SetOutput(&cc_); switch (field.cpp_type()) { case FieldDescriptor::CPPTYPE_INT32: case FieldDescriptor::CPPTYPE_INT64: case FieldDescriptor::CPPTYPE_UINT32: case FieldDescriptor::CPPTYPE_UINT64: case FieldDescriptor::CPPTYPE_DOUBLE: case FieldDescriptor::CPPTYPE_FLOAT: Print("o->", omit_default ? "AppendNumericIfNotZero" : "AppendNumeric", "(\"", field.name(), "\", ", field_expr, ");"); break; case FieldDescriptor::CPPTYPE_BOOL: Print("o->", omit_default ? "AppendBoolIfTrue" : "AppendBool", "(\"", field.name(), "\", ", field_expr, ");"); break; case FieldDescriptor::CPPTYPE_STRING: { const auto ctype = field.options().ctype(); CHECK(ctype == FieldOptions::CORD || ctype == FieldOptions::STRING) << "Unsupported ctype " << ctype; Print("o->", omit_default ? "AppendStringIfNotEmpty" : "AppendString", "(\"", field.name(), "\", ProtobufStringToString(", field_expr, "));"); break; } case FieldDescriptor::CPPTYPE_ENUM: if (omit_default) { Print("if (", field_expr, " != 0) {").Nest(); } Print("const char* enum_name = ", GetQualifiedEnumNameFn(*field.enum_type()), "(", field_expr, ");"); Print("if (enum_name[0]) {").Nest(); Print("o->AppendEnumName(\"", field.name(), "\", enum_name);"); Unnest().Print("} else {").Nest(); Print("o->AppendNumeric(\"", field.name(), "\", ", field_expr, ");"); Unnest().Print("}"); if (omit_default) { Unnest().Print("}"); } break; case FieldDescriptor::CPPTYPE_MESSAGE: CHECK(!field.message_type()->options().map_entry()); if (omit_default) { Print("if (msg.has_", field.name(), "()) {").Nest(); } Print("o->OpenNestedMessage(\"", field.name(), "\");"); Print(GetQualifiedAppendFn(*field.message_type()), "(o, ", field_expr, ");"); Print("o->CloseNestedMessage();"); if (omit_default) { Unnest().Print("}"); } break; } } void Generator::AppendFieldAppend(const FieldDescriptor& field) { absl::string_view name = field.name(); if (field.is_map()) { Print("{").Nest(); const auto& key_type = *field.message_type()->FindFieldByName("key"); const auto& value_type = *field.message_type()->FindFieldByName("value"); Print("std::vector<", key_type.cpp_type_name(), "> keys;"); Print("for (const auto& e : msg.", name, "()) keys.push_back(e.first);"); Print("std::stable_sort(keys.begin(), keys.end());"); Print("for (const auto& key : keys) {").Nest(); Print("o->OpenNestedMessage(\"", name, "\");"); AppendFieldValueAppend(key_type, false , "key"); AppendFieldValueAppend(value_type, false , StrCat("msg.", name, "().at(key)")); Print("o->CloseNestedMessage();"); Unnest().Print("}"); Unnest().Print("}"); } else if (field.is_repeated()) { Print("for (int i = 0; i < msg.", name, "_size(); ++i) {"); Nest(); AppendFieldValueAppend(field, false , StrCat("msg.", name, "(i)")); Unnest().Print("}"); } else { const auto* oneof = field.containing_oneof(); if (oneof != nullptr) { string camel_name(field.camelcase_name()); camel_name[0] = toupper(camel_name[0]); Print("if (msg.", oneof->name(), "_case() == ", GetQualifiedName(*oneof->containing_type()), "::k", camel_name, ") {"); Nest(); AppendFieldValueAppend(field, false , StrCat("msg.", name, "()")); Unnest(); Print("}"); } else { AppendFieldValueAppend(field, true , StrCat("msg.", name, "()")); } } } void Generator::AppendEnumFunctions(const EnumDescriptor& enum_d) { const string sig = StrCat("const char* ", GetEnumNameFn(enum_d), "(\n ", GetQualifiedName(enum_d), " value)"); SetOutput(&header_); Print().Print(" Print(sig, ";"); SetOutput(&cc_); Print().Print(sig, " {"); Nest().Print("switch (value) {").Nest(); for (int i = 0; i < enum_d.value_count(); ++i) { const auto& value = *enum_d.value(i); Print("case ", value.number(), ": return \"", value.name(), "\";"); } Print("default: return \"\";"); Unnest().Print("}"); Unnest().Print("}"); } void Generator::AppendParseMessageFunction(const Descriptor& md) { const bool map_append = (md.options().map_entry()); string sig; if (!map_append) { sig = StrCat("bool ProtoParseFromString(\n const string& s,\n ", GetQualifiedName(md), "* msg)"); SetOutput(&header_).Print(sig, "\n TF_MUST_USE_RESULT;"); SetOutput(&cc_); Print().Print(sig, " {").Nest(); Print("msg->Clear();"); Print("Scanner scanner(s);"); Print("if (!internal::ProtoParseFromScanner(", "&scanner, false, false, msg)) return false;"); Print("scanner.Eos();"); Print("return scanner.GetResult();"); Unnest().Print("}"); } sig = StrCat("bool ProtoParseFromScanner(", "\n ::tensorflow::strings::Scanner* scanner, bool nested, " "bool close_curly,\n "); const FieldDescriptor* key_type = nullptr; const FieldDescriptor* value_type = nullptr; if (map_append) { key_type = md.FindFieldByName("key"); value_type = md.FindFieldByName("value"); StrAppend(&sig, "::tensorflow::protobuf::Map<", GetCppClass(*key_type), ", ", GetCppClass(*value_type), ">* map)"); } else { StrAppend(&sig, GetQualifiedName(md), "* msg)"); } if (!map_append_signatures_included_.insert(sig).second) { return; } if (!map_append) { SetOutput(&header_impl_).Print(sig, ";"); } SetOutput(&cc_); Print().Print("namespace internal {"); if (map_append) { Print("namespace {"); } Print().Print(sig, " {").Nest(); if (map_append) { Print(GetCppClass(*key_type), " map_key;"); Print("bool set_map_key = false;"); Print(GetCppClass(*value_type), " map_value;"); Print("bool set_map_value = false;"); } Print("std::vector<bool> has_seen(", md.field_count(), ", false);"); Print("while(true) {").Nest(); Print("ProtoSpaceAndComments(scanner);"); Print("if (nested && (scanner->Peek() == (close_curly ? '}' : '>'))) {") .Nest(); Print("scanner->One(Scanner::ALL);"); Print("ProtoSpaceAndComments(scanner);"); if (map_append) { Print("if (!set_map_key || !set_map_value) return false;"); Print("(*map)[map_key] = map_value;"); } Print("return true;"); Unnest().Print("}"); Print("if (!nested && scanner->empty()) { return true; }"); Print("scanner->RestartCapture()"); Print(" .Many(Scanner::LETTER_DIGIT_UNDERSCORE)"); Print(" .StopCapture();"); Print("StringPiece identifier;"); Print("if (!scanner->GetResult(nullptr, &identifier)) return false;"); Print("bool parsed_colon = false;"); Print("(void)parsed_colon;"); Print("ProtoSpaceAndComments(scanner);"); Print("if (scanner->Peek() == ':') {"); Nest().Print("parsed_colon = true;"); Print("scanner->One(Scanner::ALL);"); Print("ProtoSpaceAndComments(scanner);"); Unnest().Print("}"); for (int i = 0; i < md.field_count(); ++i) { const FieldDescriptor* field = md.field(i); absl::string_view field_name = field->name(); string mutable_value_expr; string set_value_prefix; if (map_append) { mutable_value_expr = StrCat("&map_", field_name); set_value_prefix = StrCat("map_", field_name, " = "); } else if (field->is_repeated()) { if (field->is_map()) { mutable_value_expr = StrCat("msg->mutable_", field_name, "()"); set_value_prefix = "UNREACHABLE"; } else { mutable_value_expr = StrCat("msg->add_", field_name, "()"); set_value_prefix = StrCat("msg->add_", field_name); } } else { mutable_value_expr = StrCat("msg->mutable_", field_name, "()"); set_value_prefix = StrCat("msg->set_", field_name); } Print(i == 0 ? "" : "else ", "if (identifier == \"", field_name, "\") {"); Nest(); if (field->is_repeated()) { CHECK(!map_append); Print("const bool is_list = (scanner->Peek() == '[');"); Print("do {"); Nest().Print("if (is_list) {"); Nest().Print("scanner->One(Scanner::ALL);"); Print("ProtoSpaceAndComments(scanner);"); Unnest().Print("}"); } else if (field->containing_oneof() != nullptr) { CHECK(!map_append); const absl::string_view oneof_name = field->containing_oneof()->name(); Print("if (msg->", oneof_name, "_case() != 0) return false;"); } if (!field->is_repeated() && !map_append) { Print("if (has_seen[", i, "]) return false;"); Print("has_seen[", i, "] = true;"); } if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) { Print("const char open_char = scanner->Peek();"); Print("if (open_char != '{' && open_char != '<') return false;"); Print("scanner->One(Scanner::ALL);"); Print("ProtoSpaceAndComments(scanner);"); if (field->is_map()) { Print("if (!ProtoParseFromScanner("); } else { Print("if (!", GetPackageReferencePrefix(field->message_type()->file()), "internal::ProtoParseFromScanner("); } Print(" scanner, true, open_char == '{', ", mutable_value_expr, ")) return false;"); } else if (field->cpp_type() == FieldDescriptor::CPPTYPE_STRING) { Print("string str_value;"); Print( "if (!parsed_colon || " "!::tensorflow::strings::ProtoParseStringLiteralFromScanner("); Print(" scanner, &str_value)) return false;"); Print("SetProtobufStringSwapAllowed(&str_value, ", mutable_value_expr, ");"); } else if (field->cpp_type() == FieldDescriptor::CPPTYPE_ENUM) { Print("StringPiece value;"); Print( "if (!parsed_colon || " "!scanner->RestartCapture().Many(" "Scanner::LETTER_DIGIT_DASH_UNDERSCORE)." "GetResult(nullptr, &value)) return false;"); const auto* enum_d = field->enum_type(); string value_prefix; if (enum_d->containing_type() == nullptr) { value_prefix = GetPackageReferencePrefix(enum_d->file()); } else { value_prefix = StrCat(GetQualifiedName(*enum_d), "_"); } for (int enum_i = 0; enum_i < enum_d->value_count(); ++enum_i) { const auto* value_d = enum_d->value(enum_i); absl::string_view value_name = value_d->name(); string condition = StrCat("value == \"", value_name, "\""); Print(enum_i == 0 ? "" : "} else ", "if (", condition, ") {"); Nest(); Print(set_value_prefix, "(", value_prefix, value_name, ");"); Unnest(); } Print("} else {"); Nest(); Print("int32 int_value;"); Print("if (strings::SafeStringToNumeric(value, &int_value)) {"); Nest(); Print(set_value_prefix, "(static_cast<", GetQualifiedName(*enum_d), ">(int_value));"); Unnest(); Print("} else {").Nest().Print("return false;").Unnest().Print("}"); Unnest().Print("}"); } else { Print(field->cpp_type_name(), " value;"); switch (field->cpp_type()) { case FieldDescriptor::CPPTYPE_INT32: case FieldDescriptor::CPPTYPE_INT64: case FieldDescriptor::CPPTYPE_UINT32: case FieldDescriptor::CPPTYPE_UINT64: case FieldDescriptor::CPPTYPE_DOUBLE: case FieldDescriptor::CPPTYPE_FLOAT: Print( "if (!parsed_colon || " "!::tensorflow::strings::ProtoParseNumericFromScanner(", "scanner, &value)) return false;"); break; case FieldDescriptor::CPPTYPE_BOOL: Print( "if (!parsed_colon || " "!::tensorflow::strings::ProtoParseBoolFromScanner(", "scanner, &value)) return false;"); break; default: LOG(FATAL) << "handled earlier"; } Print(set_value_prefix, "(value);"); } if (field->is_repeated()) { Unnest().Print("} while (is_list && scanner->Peek() == ',');"); Print( "if (is_list && " "!scanner->OneLiteral(\"]\").GetResult()) return false;"); } if (map_append) { Print("set_map_", field_name, " = true;"); } Unnest().Print("}"); } Unnest().Print("}"); Unnest().Print("}"); Unnest().Print(); if (map_append) { Print("} } Print("} } void Generator::AppendDebugStringFunctions(const Descriptor& md) { SetOutput(&header_impl_).Print(); SetOutput(&header_).Print().Print(" string(md.full_name())); for (int short_pass = 0; short_pass < 2; ++short_pass) { const bool short_debug = (short_pass == 1); const string sig = StrCat( "string ", short_debug ? "ProtoShortDebugString" : "ProtoDebugString", "(\n const ", GetQualifiedName(md), "& msg)"); SetOutput(&header_).Print(sig, ";"); SetOutput(&cc_); Print().Print(sig, " {").Nest(); Print("string s;"); Print("::tensorflow::strings::ProtoTextOutput o(&s, ", short_debug ? "true" : "false", ");"); Print("internal::AppendProtoDebugString(&o, msg);"); Print("o.CloseTopMessage();"); Print("return s;"); Unnest().Print("}"); } const string sig = StrCat("void AppendProtoDebugString(\n", " ::tensorflow::strings::ProtoTextOutput* o,\n const ", GetQualifiedName(md), "& msg)"); SetOutput(&header_impl_).Print(sig, ";"); SetOutput(&cc_); Print().Print("namespace internal {").Print(); Print(sig, " {").Nest(); std::vector<const FieldDescriptor*> fields; fields.reserve(md.field_count()); for (int i = 0; i < md.field_count(); ++i) { fields.push_back(md.field(i)); } std::sort(fields.begin(), fields.end(), [](const FieldDescriptor* left, const FieldDescriptor* right) { return left->number() < right->number(); }); for (const FieldDescriptor* field : fields) { SetOutput(&cc_); AppendFieldAppend(*field); } Unnest().Print("}").Print().Print("} } void Generator::AppendMessageFunctions(const Descriptor& md) { if (md.options().map_entry()) { AppendParseMessageFunction(md); return; } for (int i = 0; i < md.enum_type_count(); ++i) { AppendEnumFunctions(*md.enum_type(i)); } for (int i = 0; i < md.nested_type_count(); ++i) { AppendMessageFunctions(*md.nested_type(i)); } AppendDebugStringFunctions(md); AppendParseMessageFunction(md); } void Generator::AddNamespaceToCurrentSection(absl::string_view package, bool open) { Print(); std::vector<string> parts = {""}; for (size_t i = 0; i < package.size(); ++i) { if (package[i] == '.') { parts.resize(parts.size() + 1); } else { parts.back() += package[i]; } } if (open) { for (const auto& p : parts) { Print("namespace ", p, " {"); } } else { for (auto it = parts.rbegin(); it != parts.rend(); ++it) { Print("} } } } void Generator::AddHeadersToCurrentSection(const std::vector<string>& headers) { std::vector<string> sorted = headers; std::sort(sorted.begin(), sorted.end()); for (const auto& h : sorted) { Print("#include \"", h, "\""); } } void GetAllFileDescriptorsFromFile(const FileDescriptor* fd, std::set<const FileDescriptor*>* all_fd, std::set<const Descriptor*>* all_d); void GetAllFileDescriptorsFromMessage(const Descriptor* d, std::set<const FileDescriptor*>* all_fd, std::set<const Descriptor*>* all_d) { if (!all_d->insert(d).second) return; GetAllFileDescriptorsFromFile(d->file(), all_fd, all_d); for (int i = 0; i < d->field_count(); ++i) { auto* f = d->field(i); switch (f->cpp_type()) { case FieldDescriptor::CPPTYPE_INT32: case FieldDescriptor::CPPTYPE_INT64: case FieldDescriptor::CPPTYPE_UINT32: case FieldDescriptor::CPPTYPE_UINT64: case FieldDescriptor::CPPTYPE_DOUBLE: case FieldDescriptor::CPPTYPE_FLOAT: case FieldDescriptor::CPPTYPE_BOOL: case FieldDescriptor::CPPTYPE_STRING: break; case FieldDescriptor::CPPTYPE_MESSAGE: GetAllFileDescriptorsFromMessage(f->message_type(), all_fd, all_d); break; case FieldDescriptor::CPPTYPE_ENUM: GetAllFileDescriptorsFromFile(f->enum_type()->file(), all_fd, all_d); break; } } for (int i = 0; i < d->nested_type_count(); ++i) { GetAllFileDescriptorsFromMessage(d->nested_type(i), all_fd, all_d); } } void GetAllFileDescriptorsFromFile(const FileDescriptor* fd, std::set<const FileDescriptor*>* all_fd, std::set<const Descriptor*>* all_d) { if (!all_fd->insert(fd).second) return; for (int i = 0; i < fd->message_type_count(); ++i) { GetAllFileDescriptorsFromMessage(fd->message_type(i), all_fd, all_d); } } void Generator::Generate(const FileDescriptor& fd) { const absl::string_view package = fd.package(); std::set<const FileDescriptor*> all_fd; std::set<const Descriptor*> all_d; GetAllFileDescriptorsFromFile(&fd, &all_fd, &all_d); std::vector<string> headers; SetOutput(&header_); Print(" Print("#ifndef ", GetHeaderGuard(fd, false )); Print("#define ", GetHeaderGuard(fd, false )); Print(); headers = { GetProtoHeaderName(fd), StrCat(tf_header_prefix_, "tensorflow/core/platform/macros.h"), StrCat(tf_header_prefix_, "tensorflow/core/platform/protobuf.h"), StrCat(tf_header_prefix_, "tensorflow/core/platform/types.h"), }; for (const auto& h : headers) { Print("#include \"", h, "\""); } AddNamespaceToCurrentSection(package, true ); SetOutput(&header_impl_); Print(" Print("#ifndef ", GetHeaderGuard(fd, true )); Print("#define ", GetHeaderGuard(fd, true )); Print(); headers = { GetProtoTextHeaderName(fd, false ), StrCat(tf_header_prefix_, "tensorflow/core/lib/strings/proto_text_util.h"), StrCat(tf_header_prefix_, "tensorflow/core/lib/strings/scanner.h"), }; for (const FileDescriptor* d : all_fd) { if (d != &fd) { headers.push_back(GetProtoTextHeaderName(*d, true )); } headers.push_back(GetProtoHeaderName(*d)); } AddHeadersToCurrentSection(headers); AddNamespaceToCurrentSection(package, true ); SetOutput(&header_impl_).Print().Print("namespace internal {"); SetOutput(&cc_); Print(" Print(); Print("#include <algorithm>"); Print(); headers = {GetProtoTextHeaderName(fd, true )}; AddHeadersToCurrentSection(headers); Print(); Print("using ::tensorflow::strings::ProtoSpaceAndComments;"); Print("using ::tensorflow::strings::Scanner;"); Print("using ::tensorflow::strings::StrCat;"); AddNamespaceToCurrentSection(package, true ); for (int i = 0; i < fd.enum_type_count(); ++i) { AppendEnumFunctions(*fd.enum_type(i)); } for (int i = 0; i < fd.message_type_count(); ++i) { AppendMessageFunctions(*fd.message_type(i)); } SetOutput(&header_); AddNamespaceToCurrentSection(package, false ); Print().Print("#endif SetOutput(&header_impl_).Print().Print("} AddNamespaceToCurrentSection(package, false ); Print().Print("#endif SetOutput(&cc_); AddNamespaceToCurrentSection(package, false ); } } ProtoTextFunctionCode GetProtoTextFunctionCode(const FileDescriptor& fd, const string& tf_header_prefix) { Generator gen(tf_header_prefix); gen.Generate(fd); return gen.code(); } }
#include "tensorflow/tools/proto_text/gen_proto_text_functions_lib.h" #include <string> #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/tools/proto_text/test.pb_text.h" #include "tensorflow/tools/proto_text/test.pb.h" namespace tensorflow { namespace test { namespace { std::string PrintShortTextFormat(const tensorflow::protobuf::Message& message) { std::string message_short_text; protobuf::TextFormat::Printer printer; printer.SetSingleLineMode(true); printer.SetExpandAny(true); printer.PrintToString(message, &message_short_text); if (!message_short_text.empty() && message_short_text[message_short_text.size() - 1] == ' ') { message_short_text.resize(message_short_text.size() - 1); } return message_short_text; } std::string PrintTextFormat(const tensorflow::protobuf::Message& message) { std::string message_text; protobuf::TextFormat::Printer printer; printer.SetExpandAny(true); printer.PrintToString(message, &message_text); return message_text; } template <typename T> T RoundtripParseProtoOrDie(const T& input, bool short_text) { const string s = short_text ? PrintShortTextFormat(input) : PrintTextFormat(input); T t; EXPECT_TRUE(ProtoParseFromString(s, &t)) << "Failed to parse " << s; return t; } #define EXPECT_TEXT_TRANSFORMS_MATCH() \ EXPECT_EQ(PrintTextFormat(proto), ProtoDebugString(proto)); \ EXPECT_EQ(PrintShortTextFormat(proto), ProtoShortDebugString(proto)); \ EXPECT_EQ(proto.DebugString(), \ RoundtripParseProtoOrDie(proto, true).DebugString()); \ EXPECT_EQ(proto.DebugString(), \ RoundtripParseProtoOrDie(proto, false).DebugString()); #define EXPECT_PARSE_FAILURE(str) \ EXPECT_FALSE(ProtoParseFromString(str, &proto)); \ EXPECT_FALSE(protobuf::TextFormat::ParseFromString(str, &proto)) #define EXPECT_PARSE_SUCCESS(expected, str) \ do { \ EXPECT_TRUE(ProtoParseFromString(str, &proto)); \ string proto_text_str = ProtoShortDebugString(proto); \ EXPECT_TRUE(protobuf::TextFormat::ParseFromString(str, &proto)); \ string protobuf_str = ProtoShortDebugString(proto); \ EXPECT_EQ(proto_text_str, protobuf_str); \ EXPECT_EQ(expected, proto_text_str); \ } while (false) TEST(CreateProtoDebugStringLibTest, ValidSimpleTypes) { TestAllTypes proto; proto.Clear(); proto.set_optional_int32(-1); proto.set_optional_int64(-2); proto.set_optional_uint32(3); proto.set_optional_uint64(4); proto.set_optional_sint32(-5); proto.set_optional_sint64(-6); proto.set_optional_fixed32(-7); proto.set_optional_fixed64(-8); proto.set_optional_sfixed32(-9); proto.set_optional_sfixed64(-10); proto.set_optional_float(-12.34); proto.set_optional_double(-5.678); proto.set_optional_bool(true); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_optional_int32(std::numeric_limits<int32>::max()); proto.set_optional_int64(std::numeric_limits<protobuf_int64>::max()); proto.set_optional_uint32(std::numeric_limits<uint32>::max()); proto.set_optional_uint64(std::numeric_limits<uint64>::max()); proto.set_optional_double(std::numeric_limits<double>::max()); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_optional_double(std::numeric_limits<double>::min()); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_optional_int32(std::numeric_limits<int32>::lowest()); proto.set_optional_int64(std::numeric_limits<protobuf_int64>::lowest()); proto.set_optional_double(std::numeric_limits<double>::lowest()); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_optional_double(std::numeric_limits<double>::infinity()); proto.set_optional_float(std::numeric_limits<float>::infinity()); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.set_optional_double(-1 * std::numeric_limits<double>::infinity()); proto.set_optional_float(-1 * std::numeric_limits<float>::infinity()); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); for (int i = 0; i < 256; ++i) { proto.mutable_optional_string()->push_back(static_cast<char>(i)); proto.mutable_optional_bytes()->push_back(static_cast<char>(i)); } strings::StrAppend(proto.mutable_optional_string(), "¢€𐍈"); proto.set_optional_cord(proto.optional_string()); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.add_repeated_int32(-1); proto.add_repeated_int32(0); proto.add_repeated_int64(0); proto.add_repeated_int64(1); proto.add_repeated_uint32(-10); proto.add_repeated_uint32(0); proto.add_repeated_uint32(10); proto.add_repeated_uint64(-20); proto.add_repeated_uint64(0); proto.add_repeated_uint64(20); proto.add_repeated_sint32(-30); proto.add_repeated_sint32(0); proto.add_repeated_sint32(30); proto.add_repeated_sint64(-40); proto.add_repeated_sint64(0); proto.add_repeated_sint64(40); proto.add_repeated_fixed32(-50); proto.add_repeated_fixed32(0); proto.add_repeated_fixed32(50); proto.add_repeated_fixed64(-60); proto.add_repeated_fixed64(0); proto.add_repeated_fixed64(60); proto.add_repeated_sfixed32(-70); proto.add_repeated_sfixed32(0); proto.add_repeated_sfixed32(70); proto.add_repeated_sfixed64(-80); proto.add_repeated_sfixed64(0); proto.add_repeated_sfixed64(80); proto.add_repeated_float(-1.2345); proto.add_repeated_float(0); proto.add_repeated_float(-2.3456); proto.add_repeated_double(-10.2345); proto.add_repeated_double(0); proto.add_repeated_double(-20.3456); proto.add_repeated_bool(false); proto.add_repeated_bool(true); proto.add_repeated_bool(false); proto.add_repeated_string("abc"); proto.add_repeated_string(""); proto.add_repeated_string("def"); proto.add_repeated_cord("abc"); proto.add_repeated_cord(""); proto.add_repeated_cord("def"); proto.add_packed_repeated_int64(-1000); proto.add_packed_repeated_int64(0); proto.add_packed_repeated_int64(1000); EXPECT_TEXT_TRANSFORMS_MATCH(); EXPECT_PARSE_SUCCESS("repeated_int32: 1 repeated_int32: 2 repeated_int32: 3", "repeated_int32: [1, 2 , 3]"); EXPECT_PARSE_SUCCESS(("repeated_bool: false repeated_bool: false " "repeated_bool: true repeated_bool: true " "repeated_bool: false repeated_bool: true"), "repeated_bool: [false, 0, 1, true, False, True]"); EXPECT_PARSE_SUCCESS(("repeated_string: \"a,b\" " "repeated_string: \"cdef\""), "repeated_string: [ 'a,b', 'cdef' ] "); EXPECT_PARSE_SUCCESS("optional_string: \"123\\\" \\'xyz\"", "optional_string: '123\\\" \\'xyz' "); EXPECT_PARSE_SUCCESS("optional_double: 10000", "optional_double: 1e4"); EXPECT_PARSE_FAILURE("optional_string: '1' optional_string: '2'"); EXPECT_PARSE_FAILURE("optional_double: 123 optional_double: 456"); EXPECT_PARSE_FAILURE("optional_double: 0001"); EXPECT_PARSE_FAILURE("optional_double: 000.1"); EXPECT_PARSE_FAILURE("optional_double: a"); EXPECT_PARSE_FAILURE("optional_double: x123"); EXPECT_PARSE_FAILURE("optional_double: '123'"); EXPECT_PARSE_FAILURE("optional_double: --111"); EXPECT_PARSE_FAILURE("optional_string: 'abc\""); EXPECT_PARSE_FAILURE("optional_bool: truE"); EXPECT_PARSE_FAILURE("optional_bool: FALSE"); } TEST(CreateProtoDebugStringLibTest, NestedMessages) { TestAllTypes proto; proto.Clear(); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.mutable_optional_nested_message(); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.mutable_optional_foreign_message(); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.mutable_optional_nested_message(); proto.mutable_optional_foreign_message(); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.mutable_optional_nested_message()->set_optional_int32(1); proto.mutable_optional_foreign_message()->set_c(-1234); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.mutable_optional_nested_message()->set_optional_int32(1234); proto.mutable_optional_nested_message() ->mutable_msg(); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.mutable_optional_nested_message()->set_optional_int32(1234); proto.mutable_optional_nested_message()->mutable_msg()->set_optional_string( "abc"); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.mutable_optional_nested_message()->mutable_msg()->set_optional_string( "abc"); proto.mutable_optional_nested_message()->set_optional_int64(1234); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); auto* nested = proto.add_repeated_nested_message(); nested = proto.add_repeated_nested_message(); nested->set_optional_int32(123); nested->mutable_msg(); nested = proto.add_repeated_nested_message(); nested->mutable_msg(); nested->mutable_msg()->set_optional_string("abc"); nested->set_optional_int64(1234); EXPECT_TEXT_TRANSFORMS_MATCH(); EXPECT_PARSE_SUCCESS("optional_nested_message { optional_int32: 123 }", "optional_nested_message: < optional_int32: 123 >"); EXPECT_PARSE_FAILURE("optional_nested_message: < optional_int32: 123 }"); EXPECT_PARSE_FAILURE("optional_nested_message: { optional_int32: 123 >"); EXPECT_PARSE_SUCCESS("optional_nested_message { optional_int32: 123 }", "optional_nested_message < optional_int32: 123 >"); EXPECT_PARSE_SUCCESS("optional_nested_message { optional_int32: 123 }", "optional_nested_message{ optional_int32: 123 } "); EXPECT_PARSE_SUCCESS( ("repeated_nested_message { } " "repeated_nested_message { optional_int32: 123 }"), "repeated_nested_message: [ { }, { optional_int32: 123 } ]"); EXPECT_PARSE_SUCCESS( ("repeated_nested_message { } " "repeated_nested_message { optional_int32: 123 }"), "repeated_nested_message [ { }, { optional_int32: 123 } ]"); EXPECT_PARSE_SUCCESS( ("repeated_nested_message { } " "repeated_nested_message { optional_int32: 123 } " "repeated_nested_message { optional_int32: 456 }"), ("repeated_nested_message [ { }, { optional_int32: 123 } ]" "repeated_nested_message [ { optional_int32: 456 } ]")); EXPECT_PARSE_FAILURE("optional_nested_message: {optional_int32: 'abc' }"); EXPECT_PARSE_FAILURE( ("optional_nested_message { optional_int32: 123 } " "optional_nested_message { optional_int64: 456 }")); } TEST(CreateProtoDebugStringLibTest, RecursiveMessage) { NestedTestAllTypes proto; NestedTestAllTypes* cur = &proto; for (int depth = 0; depth < 20; ++depth) { cur->mutable_payload()->set_optional_int32(1000 + depth); cur = cur->mutable_child(); } EXPECT_TEXT_TRANSFORMS_MATCH(); } template <typename T> T ParseProto(const string& value_text_proto) { T value; EXPECT_TRUE(protobuf::TextFormat::ParseFromString(value_text_proto, &value)) << value_text_proto; return value; } TestAllTypes::NestedMessage ParseNestedMessage(const string& value_text_proto) { return ParseProto<TestAllTypes::NestedMessage>(value_text_proto); } TEST(CreateProtoDebugStringLibTest, Map) { TestAllTypes proto; std::vector<TestAllTypes::NestedMessage> msg_values; msg_values.push_back(ParseNestedMessage("optional_int32: 345")); msg_values.push_back(ParseNestedMessage("optional_int32: 123")); msg_values.push_back(ParseNestedMessage("optional_int32: 234")); msg_values.push_back(ParseNestedMessage("optional_int32: 0")); proto.Clear(); { auto& map = *proto.mutable_map_string_to_message(); map["def"] = msg_values[0]; map["abc"] = msg_values[1]; map["cde"] = msg_values[2]; map[""] = msg_values[3]; } EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); { auto& map = *proto.mutable_map_int32_to_message(); map[20] = msg_values[0]; map[10] = msg_values[1]; map[15] = msg_values[2]; map[0] = msg_values[3]; } EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); { auto& map = *proto.mutable_map_int64_to_message(); map[20] = msg_values[0]; map[10] = msg_values[1]; map[15] = msg_values[2]; map[0] = msg_values[3]; } EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); { auto& map = *proto.mutable_map_int64_to_message(); map[true] = msg_values[0]; map[false] = msg_values[1]; } EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); { auto& map = *proto.mutable_map_string_to_int64(); map["def"] = 0; map["abc"] = std::numeric_limits<protobuf_int64>::max(); map[""] = 20; } EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); { auto& map = *proto.mutable_map_int64_to_string(); map[0] = "def"; map[std::numeric_limits<protobuf_int64>::max()] = ""; map[20] = "abc"; } EXPECT_TEXT_TRANSFORMS_MATCH(); EXPECT_PARSE_SUCCESS(("map_string_to_int64 { key: \"abc\" value: 5 } " "map_string_to_int64 { key: \"def\" value: 2 } " "map_string_to_int64 { key: \"ghi\" value: 4 }"), ("map_string_to_int64: { key: 'abc' value: 1 } " "map_string_to_int64: { key: 'def' value: 2 } " "map_string_to_int64: { key: 'ghi' value: 3 } " "map_string_to_int64: { key: 'ghi' value: 4 } " "map_string_to_int64: { key: 'abc' value: 5 } ")); } TEST(CreateProtoDebugStringLibTest, Enums) { TestAllTypes proto; proto.Clear(); proto.set_optional_nested_enum(TestAllTypes::ZERO); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_optional_nested_enum(TestAllTypes::FOO); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.add_repeated_nested_enum(TestAllTypes::FOO); proto.add_repeated_nested_enum(TestAllTypes::ZERO); proto.add_repeated_nested_enum(TestAllTypes::BAR); proto.add_repeated_nested_enum(TestAllTypes::NEG); proto.add_repeated_nested_enum(TestAllTypes::ZERO); proto.set_optional_foreign_enum(ForeignEnum::FOREIGN_BAR); EXPECT_TEXT_TRANSFORMS_MATCH(); EXPECT_PARSE_SUCCESS( "optional_nested_enum: BAR " "repeated_nested_enum: BAR " "repeated_nested_enum: ZERO " "repeated_nested_enum: FOO", ("repeated_nested_enum: 2 " "repeated_nested_enum: 0 " "optional_nested_enum: 2 " "repeated_nested_enum: 1")); EXPECT_PARSE_SUCCESS("", "optional_nested_enum: -0"); EXPECT_PARSE_FAILURE("optional_nested_enum: 2147483648"); EXPECT_PARSE_FAILURE("optional_nested_enum: BARNONE"); EXPECT_PARSE_FAILURE("optional_nested_enum: 'BAR'"); EXPECT_PARSE_FAILURE("optional_nested_enum: \"BAR\" "); EXPECT_EQ(string("BAR"), string(EnumName_TestAllTypes_NestedEnum(TestAllTypes::BAR))); EXPECT_EQ(string(""), string(EnumName_TestAllTypes_NestedEnum( static_cast<TestAllTypes_NestedEnum>(123)))); } TEST(CreateProtoDebugStringLibTest, Oneof) { TestAllTypes proto; proto.Clear(); proto.set_oneof_string("abc"); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_oneof_string(""); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_oneof_string("abc"); proto.set_oneof_uint32(123); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_oneof_uint32(0); EXPECT_TEXT_TRANSFORMS_MATCH(); proto.Clear(); proto.set_oneof_enum(TestAllTypes::ZERO); EXPECT_TEXT_TRANSFORMS_MATCH(); EXPECT_PARSE_FAILURE("oneof_string: \"abc\" oneof_uint32: 13 "); EXPECT_PARSE_FAILURE("oneof_string: \"abc\" oneof_string: \"def\" "); } TEST(CreateProtoDebugStringLibTest, Comments) { TestAllTypes proto; EXPECT_PARSE_SUCCESS("optional_int64: 123 optional_string: \"#text\"", ("#leading comment \n" "optional_int64# comment\n" ":# comment\n" "123# comment\n" "optional_string # comment\n" ": # comment\n" "\"#text\"#comment####\n")); EXPECT_PARSE_FAILURE("optional_int64: EXPECT_PARSE_FAILURE("optional_int64:\n123"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9d6eea50-7e9c-414d-8036-f4cbf5f26b7e
cpp
tensorflow/tensorflow
inline_partitionedcall
tensorflow/tools/graph_transforms/inline_partitionedcall.cc
tensorflow/tools/graph_transforms/inline_partitionedcall_test.cc
#include <string> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { constexpr char kPartitionedCallOpName[] = "PartitionedCall"; constexpr char kFunctionAttrName[] = "f"; namespace { absl::optional<FunctionDef> GetFunctionByNameFromLibrary( const GraphDef& graph, absl::string_view function_name) { for (const auto& fct : graph.library().function()) { if (fct.signature().name() == function_name) { return fct; } } return {}; } std::string NormalizeNodeDefInput(const std::string& input_name) { std::vector<std::string> name_parts = absl::StrSplit(input_name, absl::ByChar(':')); if (name_parts.size() > 2) { return absl::StrCat(name_parts[0], ":", name_parts.back()); } return input_name; } } Status InlinePartitionedCall(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { output_graph_def->Clear(); absl::flat_hash_map<std::string, std::string> remap_input; for (const NodeDef& node : input_graph_def.node()) { if (node.op() == kPartitionedCallOpName) { if (node.attr().count(kFunctionAttrName) == 0) { return Status( absl::StatusCode::kNotFound, "Node " + node.name() + " has no attribute: " + kFunctionAttrName); } if (!node.attr().at(kFunctionAttrName).has_func()) { return Status(absl::StatusCode::kNotFound, "Cannot figure out function name"); } const std::string function_name = node.attr().at(kFunctionAttrName).func().name(); absl::optional<FunctionDef> function = GetFunctionByNameFromLibrary(input_graph_def, function_name); if (!function.has_value()) { return Status(absl::StatusCode::kNotFound, "function " + function_name + " Not found"); } const std::string prefix = node.name(); const int kOutputArgumentCount = function->signature().output_arg().size(); for (int k = 0; k < kOutputArgumentCount; ++k) { const std::string function_arg_output_name = function->ret().at(function->signature().output_arg()[k].name()); remap_input.insert_or_assign( CanonicalInputName(absl::StrCat(node.name(), ":", k)), absl::StrCat(prefix, "/", NormalizeNodeDefInput(function_arg_output_name))); } const int kInputArgumentCount = function->signature().input_arg().size(); if (node.input().size() != kInputArgumentCount) { return Status(absl::StatusCode::kInvalidArgument, "Called function " + function_name + " has invalid input signature."); } absl::flat_hash_map<std::string, std::string> input_argument_map; for (int k = 0; k < kInputArgumentCount; ++k) { const std::string canonical_name = CanonicalInputName(function->signature().input_arg()[k].name()); input_argument_map.insert_or_assign(canonical_name, node.input()[k]); } for (const NodeDef& function_node : function->node_def()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = function_node; new_node->set_name(absl::StrCat(prefix, "/", function_node.name())); absl::c_transform( *new_node->mutable_input(), new_node->mutable_input()->begin(), [prefix, input_argument_map](const std::string& input_name) { const std::string canonical_input_name = CanonicalInputName(input_name); if (input_argument_map.find(canonical_input_name) != input_argument_map.end()) { return input_argument_map.at(canonical_input_name); } return absl::StrCat(prefix, "/", NormalizeNodeDefInput(input_name)); }); } } else { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; } } for (NodeDef& node : *output_graph_def->mutable_node()) { absl::c_transform( *node.mutable_input(), node.mutable_input()->begin(), [remap_input](const std::string& input_name) { const std::string canonical_input_name = CanonicalInputName(input_name); if (remap_input.find(canonical_input_name) != remap_input.end()) { return remap_input.at(canonical_input_name); } return input_name; }); } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("inline_partitionedcall", InlinePartitionedCall); } }
#include <algorithm> #include <string> #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { constexpr char kGraphDefWithPartitionedCall[] = "node {\n" " name: \"y\"\n" " op: \"Placeholder\"\n" "}\n" "node {\n" " name: \"sub/y\"\n" " op: \"Const\"\n" "}\n" "node {\n" " name: \"PartitionedCall\"\n" " op: \"PartitionedCall\"\n" " input: \"y\"\n" " input: \"sub/y\"\n" " attr {\n" " key: \"f\"\n" " value {\n" " func {\n" " name: \"__inference_simple_add_14\"\n" " }\n" " }\n" " }\n" "}\n" "node {\n" " name: \"add/y\"\n" " op: \"Const\"\n" "}\n" "node {\n" " name: \"add\"\n" " op: \"AddV2\"\n" " input: \"PartitionedCall\"\n" " input: \"add/y\"\n" "}\n" "node {\n" " name: \"Identity\"\n" " op: \"Identity\"\n" " input: \"add\"\n" "}\n" "library {\n" " function {\n" " signature {\n" " name: \"__inference_simple_add_14\"\n" " input_arg {\n" " name: \"x\"\n" " type: DT_FLOAT\n" " }\n" " input_arg {\n" " name: \"y\"\n" " type: DT_FLOAT\n" " }\n" " output_arg {\n" " name: \"identity\"\n" " type: DT_FLOAT\n" " }\n" " }\n" " node_def {\n" " name: \"mul\"\n" " op: \"Mul\"\n" " input: \"x\"\n" " input: \"y\"\n" " }\n" " node_def {\n" " name: \"add/y\"\n" " op: \"Const\"\n" " }\n" " node_def {\n" " name: \"add\"\n" " op: \"AddV2\"\n" " input: \"mul:z:0\"\n" " input: \"add/y:output:0\"\n" " }\n" " node_def {\n" " name: \"Identity\"\n" " op: \"Identity\"\n" " input: \"add:z:0\"\n" " }\n" " ret {\n" " key: \"identity\"\n" " value: \"Identity:output:0\"\n" " }\n" " }\n" "}\n"; Status InlinePartitionedCall(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); TEST(InlinePartitionedCallTest, Inlining) { GraphDef in_graph; EXPECT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString( kGraphDefWithPartitionedCall, &in_graph)); GraphDef result; TransformFuncContext context; context.input_names = {"y"}; context.output_names = {"Identity"}; TF_ASSERT_OK(InlinePartitionedCall(in_graph, context, &result)); EXPECT_TRUE(std::none_of( result.node().cbegin(), result.node().cend(), [](const NodeDef& node) { return node.op() == "PartitionedCall"; })); EXPECT_EQ(9, result.node().size()); TF_EXPECT_OK(IsGraphValid(result)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/inline_partitionedcall.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/inline_partitionedcall_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
dee11806-ee1a-48a5-8587-2f45b172a9ae
cpp
tensorflow/tensorflow
fuse_convolutions
tensorflow/tools/graph_transforms/fuse_convolutions.cc
tensorflow/tools/graph_transforms/fuse_convolutions_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FuseResizePadAndConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"Conv2D", { {"MirrorPad", { {"ResizeBilinear"}, {"*"} } }, {"*"} } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& conv_node = match.node; const NodeDef& mirror_pad_node = match.inputs[0].node; const NodeDef& weights_node = match.inputs[1].node; const NodeDef& resize_node = match.inputs[0].inputs[0].node; const NodeDef& pad_dims_node = match.inputs[0].inputs[1].node; new_nodes->push_back(weights_node); new_nodes->push_back(pad_dims_node); NodeDef fused_conv; fused_conv.set_op("FusedResizeAndPadConv2D"); fused_conv.set_name(match.node.name()); AddNodeInput(resize_node.input(0), &fused_conv); AddNodeInput(resize_node.input(1), &fused_conv); AddNodeInput(mirror_pad_node.input(1), &fused_conv); AddNodeInput(conv_node.input(1), &fused_conv); CopyNodeAttr(resize_node, "align_corners", "resize_align_corners", &fused_conv); CopyNodeAttr(mirror_pad_node, "mode", "mode", &fused_conv); CopyNodeAttr(conv_node, "T", "T", &fused_conv); CopyNodeAttr(conv_node, "padding", "padding", &fused_conv); CopyNodeAttr(conv_node, "strides", "strides", &fused_conv); new_nodes->push_back(fused_conv); return OkStatus(); }, {}, &replaced_graph_def)); *output_graph_def = replaced_graph_def; return OkStatus(); } Status FuseResizeAndConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"Conv2D", { {"ResizeBilinear"}, {"*"} } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& conv_node = match.node; const NodeDef& resize_node = match.inputs[0].node; const NodeDef& weights_node = match.inputs[1].node; new_nodes->push_back(weights_node); NodeDef pad_dims_node; pad_dims_node.set_op("Const"); pad_dims_node.set_name(conv_node.name() + "_dummy_paddings"); SetNodeAttr("dtype", DT_INT32, &pad_dims_node); SetNodeTensorAttr<int32>("value", {4, 2}, {0, 0, 0, 0, 0, 0, 0, 0}, &pad_dims_node); new_nodes->push_back(pad_dims_node); NodeDef fused_conv; fused_conv.set_op("FusedResizeAndPadConv2D"); fused_conv.set_name(match.node.name()); AddNodeInput(resize_node.input(0), &fused_conv); AddNodeInput(resize_node.input(1), &fused_conv); AddNodeInput(pad_dims_node.name(), &fused_conv); AddNodeInput(conv_node.input(1), &fused_conv); CopyNodeAttr(resize_node, "align_corners", "resize_align_corners", &fused_conv); SetNodeAttr("mode", "REFLECT", &fused_conv); CopyNodeAttr(conv_node, "T", "T", &fused_conv); CopyNodeAttr(conv_node, "padding", "padding", &fused_conv); CopyNodeAttr(conv_node, "strides", "strides", &fused_conv); new_nodes->push_back(fused_conv); return OkStatus(); }, {}, &replaced_graph_def)); *output_graph_def = replaced_graph_def; return OkStatus(); } Status FusePadAndConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"Conv2D", { {"MirrorPad", { {"*"}, {"*"}, } }, {"*"} } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& conv_node = match.node; CHECK_EQ("Conv2D", conv_node.op()); const NodeDef& mirror_pad_node = match.inputs[0].node; CHECK_EQ("MirrorPad", mirror_pad_node.op()); const NodeDef& weights_node = match.inputs[1].node; const NodeDef& input_node = match.inputs[0].inputs[0].node; const NodeDef& pad_dims_node = match.inputs[0].inputs[1].node; new_nodes->push_back(weights_node); new_nodes->push_back(input_node); new_nodes->push_back(pad_dims_node); NodeDef fused_conv; fused_conv.set_op("FusedPadConv2D"); fused_conv.set_name(match.node.name()); AddNodeInput(mirror_pad_node.input(0), &fused_conv); AddNodeInput(mirror_pad_node.input(1), &fused_conv); AddNodeInput(conv_node.input(1), &fused_conv); CopyNodeAttr(mirror_pad_node, "mode", "mode", &fused_conv); CopyNodeAttr(conv_node, "T", "T", &fused_conv); CopyNodeAttr(conv_node, "padding", "padding", &fused_conv); CopyNodeAttr(conv_node, "strides", "strides", &fused_conv); new_nodes->push_back(fused_conv); return OkStatus(); }, {}, &replaced_graph_def)); *output_graph_def = replaced_graph_def; return OkStatus(); } REGISTER_GRAPH_TRANSFORM("fuse_resize_pad_and_conv", FuseResizePadAndConv); REGISTER_GRAPH_TRANSFORM("fuse_resize_and_conv", FuseResizeAndConv); REGISTER_GRAPH_TRANSFORM("fuse_pad_and_conv", FusePadAndConv); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FuseResizePadAndConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status FuseResizeAndConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status FusePadAndConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class FuseConvolutionsTest : public ::testing::Test { protected: void TestFuseResizePadAndConv() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 2, 3, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Output resize_op = ResizeBilinear(root.WithOpName("resize_op"), input_op, Const(root.WithOpName("size"), {12, 4}), ResizeBilinear::AlignCorners(false)); Tensor pad_dims_data(DT_INT32, TensorShape({4, 2})); test::FillValues<int32>(&pad_dims_data, {0, 0, 1, 1, 2, 2, 0, 0}); Output pad_dims_op = Const(root.WithOpName("pad_dims_op"), Input::Initializer(pad_dims_data)); Output pad_op = MirrorPad(root.WithOpName("pad_op"), resize_op, pad_dims_op, "REFLECT"); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("output"), pad_op, weights_op, {1, 1, 1, 1}, "VALID"); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FuseResizePadAndConv(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("Conv2D", node.op()); EXPECT_NE("MirrorPad", node.op()); EXPECT_NE("ResizeBilinear", node.op()); } } void TestFuseResizeAndConv() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 2, 3, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Output resize_op = ResizeBilinear(root.WithOpName("resize_op"), input_op, Const(root.WithOpName("size"), {12, 4}), ResizeBilinear::AlignCorners(false)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("output"), resize_op, weights_op, {1, 1, 1, 1}, "VALID"); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FuseResizeAndConv(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("Conv2D", node.op()); EXPECT_NE("ResizeBilinear", node.op()); } } void TestFusePadAndConv() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 2, 3, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor pad_dims_data(DT_INT32, TensorShape({4, 2})); test::FillValues<int32>(&pad_dims_data, {0, 0, 1, 1, 2, 2, 0, 0}); Output pad_dims_op = Const(root.WithOpName("pad_dims_op"), Input::Initializer(pad_dims_data)); Output pad_op = MirrorPad(root.WithOpName("pad_op"), input_op, pad_dims_op, "REFLECT"); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("output"), pad_op, weights_op, {1, 1, 1, 1}, "VALID"); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK( FusePadAndConv(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("Conv2D", node.op()); EXPECT_NE("MirrorPad", node.op()); } } }; TEST_F(FuseConvolutionsTest, TestFuseResizePadAndConv) { TestFuseResizePadAndConv(); } TEST_F(FuseConvolutionsTest, TestFuseResizeAndConv) { TestFuseResizeAndConv(); } TEST_F(FuseConvolutionsTest, TestFusePadAndConv) { TestFusePadAndConv(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fuse_convolutions.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fuse_convolutions_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
376cd3aa-a22b-41e5-b02a-66981383442d
cpp
tensorflow/tensorflow
quantize_nodes
tensorflow/tools/graph_transforms/quantize_nodes.cc
tensorflow/tools/graph_transforms/quantize_nodes_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/threadpool_device.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { struct QuantizedOpInfo { string float_name; std::vector<string> attrs_to_copy; std::vector<std::pair<string, DataType>> dtypes_to_set; DataType input_bit_depth; DataType output_bit_depth; std::set<int32> unquantized_inputs; enum { CONTIGUOUS_MIN_MAX, SEPARATE_MIN_MAX } min_max_order; }; const std::vector<QuantizedOpInfo>& GetQuantizedOpList() { static const std::vector<QuantizedOpInfo> op_list = { {"Add", {}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"AvgPool", {"ksize", "strides", "padding"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"BiasAdd", {}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"out_type", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Concat", {"N"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {0}, QuantizedOpInfo::SEPARATE_MIN_MAX}, {"Conv2D", {"strides", "padding"}, {{"Tinput", DT_QUINT8}, {"Tfilter", DT_QUINT8}, {"out_type", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"MatMul", {"transpose_a", "transpose_b"}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"MaxPool", {"ksize", "strides", "padding"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Mul", {}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Relu", {}, {{"Tinput", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"ResizeBilinear", {"align_corners"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {1}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Relu6", {}, {{"Tinput", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Reshape", {}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {1}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, }; return op_list; } namespace { string UniqueNodeNameFromInput(const string& input_name) { string prefix; string node_name; string suffix; NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix); string result; if (prefix == "^") { result += "__hat__"; } result += node_name; if (!suffix.empty()) { result += "__port__" + suffix.substr(1, suffix.size() - 1); } return result; } Status ExtractRangeFromParams(const TransformFuncContext& context, const string& min_name, const string& max_name, float* min_value, float* max_value, bool* has_range) { const bool has_min = (context.params.count(min_name) != 0); const bool has_max = (context.params.count(max_name) != 0); *has_range = (has_min || has_max); if (!*has_range) { return OkStatus(); } if (!has_min || !has_max) { return errors::InvalidArgument("You must pass both ", min_name, " and ", max_name, " into quantize_nodes"); } TF_RETURN_IF_ERROR(context.GetOneFloatParameter(min_name, 0.0f, min_value)); TF_RETURN_IF_ERROR(context.GetOneFloatParameter(max_name, 0.0f, max_value)); return OkStatus(); } } Status MergeDuplicateNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::set<string> input_names(context.input_names.begin(), context.input_names.end()); std::set<string> output_names(context.output_names.begin(), context.output_names.end()); GraphDef current_graph_def = input_graph_def; bool any_duplicates_found; do { any_duplicates_found = false; std::map<uint64, std::vector<const NodeDef*>> hashed_nodes; for (const NodeDef& node : current_graph_def.node()) { NodeDef nameless_node = node; if (!input_names.count(node.name()) && !output_names.count(node.name())) { nameless_node.set_name(""); } const uint64 hash = HashNodeDef(nameless_node); hashed_nodes[hash].push_back(&node); } std::map<string, string> inputs_to_rename; GraphDef merged_graph_def; for (const std::pair<const uint64, std::vector<const NodeDef*>>& hashed_node_info : hashed_nodes) { const std::vector<const NodeDef*>& hash_node_list = hashed_node_info.second; for (int i = 0; i < hash_node_list.size(); ++i) { const NodeDef* current_node = hash_node_list[i]; const OpDef* op_def = nullptr; TF_RETURN_IF_ERROR( OpRegistry::Global()->LookUpOpDef(current_node->op(), &op_def)); const bool is_duplicate = ((!op_def->is_stateful()) && (i > 0)); if (is_duplicate) { const string original_name = hash_node_list[0]->name(); inputs_to_rename[current_node->name() + ":*"] = original_name; any_duplicates_found = true; } else { NodeDef* new_node = merged_graph_def.mutable_node()->Add(); *new_node = *current_node; } } } TF_RETURN_IF_ERROR(RenameNodeInputs(merged_graph_def, inputs_to_rename, std::unordered_set<string>(), &current_graph_def)); } while (any_duplicates_found); *output_graph_def = current_graph_def; return OkStatus(); } Status RemoveRedundantQuantizations(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::set<string> graph_outputs; for (const string& output_name : context.output_names) { graph_outputs.insert(NodeNameFromInput(output_name)); } std::map<string, string> inputs_to_rename; GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"QuantizeV2", { {"Dequantize"}, {"Min"}, {"Max"}, } }, [&inputs_to_rename, &graph_outputs](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& quantize_node = match.node; const NodeDef& dequantize_node = match.inputs[0].node; inputs_to_rename[quantize_node.name() + ":0"] = dequantize_node.input(0); inputs_to_rename[quantize_node.name() + ":1"] = dequantize_node.input(1); inputs_to_rename[quantize_node.name() + ":2"] = dequantize_node.input(2); if (output_nodes.count(dequantize_node.name()) || graph_outputs.count(dequantize_node.name())) { CopyOriginalMatch(match, new_nodes); } return OkStatus(); }, {true}, &replaced_graph_def)); return RenameNodeInputs(replaced_graph_def, inputs_to_rename, std::unordered_set<string>(), output_graph_def); } Status QuantizePlaceholders(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { float input_min; float input_max; bool has_input_range; TF_RETURN_IF_ERROR(ExtractRangeFromParams(context, "input_min", "input_max", &input_min, &input_max, &has_input_range)); if (!has_input_range) { *output_graph_def = input_graph_def; return OkStatus(); } std::map<string, string> inputs_to_rename_first_pass; std::map<string, string> inputs_to_rename_second_pass; GraphDef placeholder_graph_def; placeholder_graph_def.Clear(); for (const NodeDef& node : input_graph_def.node()) { if (node.op() != "Placeholder") { *(placeholder_graph_def.mutable_node()->Add()) = node; } else { string namespace_prefix = node.name() + "_eightbit"; NodeDef quantized_placeholder; quantized_placeholder = node; SetNodeAttr("dtype", DT_QUINT8, &quantized_placeholder); *(placeholder_graph_def.mutable_node()->Add()) = quantized_placeholder; NodeDef min_node; min_node.set_op("Const"); min_node.set_name(namespace_prefix + "/min"); SetNodeAttr("dtype", DT_FLOAT, &min_node); Tensor min_tensor(DT_FLOAT, {}); min_tensor.flat<float>()(0) = input_min; SetNodeTensorAttr<float>("value", min_tensor, &min_node); *(placeholder_graph_def.mutable_node()->Add()) = min_node; NodeDef max_node; max_node.set_op("Const"); max_node.set_name(namespace_prefix + "/max"); SetNodeAttr("dtype", DT_FLOAT, &max_node); Tensor max_tensor(DT_FLOAT, {}); max_tensor.flat<float>()(0) = input_max; SetNodeTensorAttr<float>("value", max_tensor, &max_node); *(placeholder_graph_def.mutable_node()->Add()) = max_node; const string rename_suffix = "__RENAMED_PLACEHOLDER__"; NodeDef dequantize_node; dequantize_node.set_op("Dequantize"); dequantize_node.set_name(namespace_prefix + "/dequantize"); SetNodeAttr("T", DT_QUINT8, &dequantize_node); SetNodeAttr("mode", "MIN_FIRST", &dequantize_node); AddNodeInput(node.name() + rename_suffix, &dequantize_node); AddNodeInput(min_node.name(), &dequantize_node); AddNodeInput(max_node.name(), &dequantize_node); *(placeholder_graph_def.mutable_node()->Add()) = dequantize_node; inputs_to_rename_first_pass[node.name()] = dequantize_node.name(); inputs_to_rename_second_pass[node.name() + rename_suffix] = node.name(); } } GraphDef first_pass_graph_def; TF_RETURN_IF_ERROR( RenameNodeInputs(placeholder_graph_def, inputs_to_rename_first_pass, std::unordered_set<string>(), &first_pass_graph_def)); TF_RETURN_IF_ERROR( RenameNodeInputs(first_pass_graph_def, inputs_to_rename_second_pass, std::unordered_set<string>(), output_graph_def)); return OkStatus(); } Status ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"FakeQuantWithMinMaxVars", { {"*"}, {"Const"}, {"Const"}, } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& fake_quant_node = match.node; const NodeDef& original_op_node = match.inputs[0].node; const NodeDef& fake_quant_min_node = match.inputs[1].node; const NodeDef& fake_quant_max_node = match.inputs[2].node; string namespace_prefix = fake_quant_node.name() + "_eightbit"; new_nodes->push_back(original_op_node); new_nodes->push_back(fake_quant_min_node); new_nodes->push_back(fake_quant_max_node); NodeDef quantize_node; quantize_node.set_op("QuantizeV2"); quantize_node.set_name(namespace_prefix + "/quantize"); SetNodeAttr("T", DT_QINT32, &quantize_node); SetNodeAttr("mode", "MIN_FIRST", &quantize_node); AddNodeInput(fake_quant_node.input(0), &quantize_node); AddNodeInput(fake_quant_min_node.name(), &quantize_node); AddNodeInput(fake_quant_max_node.name(), &quantize_node); new_nodes->push_back(quantize_node); NodeDef requantize_node; requantize_node.set_op("Requantize"); requantize_node.set_name(namespace_prefix + "/requantize"); SetNodeAttr("Tinput", DT_QINT32, &requantize_node); SetNodeAttr("out_type", DT_QUINT8, &requantize_node); AddNodeInput(quantize_node.name() + ":0", &requantize_node); AddNodeInput(quantize_node.name() + ":1", &requantize_node); AddNodeInput(quantize_node.name() + ":2", &requantize_node); AddNodeInput(fake_quant_min_node.name(), &requantize_node); AddNodeInput(fake_quant_max_node.name(), &requantize_node); new_nodes->push_back(requantize_node); NodeDef dequantize_node; dequantize_node.set_op("Dequantize"); dequantize_node.set_name(fake_quant_node.name()); SetNodeAttr("T", DT_QUINT8, &dequantize_node); SetNodeAttr("mode", "MIN_FIRST", &dequantize_node); AddNodeInput(requantize_node.name() + ":0", &dequantize_node); AddNodeInput(requantize_node.name() + ":1", &dequantize_node); AddNodeInput(requantize_node.name() + ":2", &dequantize_node); new_nodes->push_back(dequantize_node); return OkStatus(); }, {}, output_graph_def)); return OkStatus(); } Status MergeAdjacentRequantizes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"Requantize", { {"QuantizeV2", { {"Dequantize", { {"Requantize", { {"*"}, {"*"}, {"*"}, {"RequantizationRange"}, {"RequantizationRange"}, } }, {"Requantize"}, {"Requantize"}, } }, {"Const"}, {"Const"}, }, }, {"QuantizeV2"}, {"QuantizeV2"}, {"Const"}, {"Const"}, } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& fake_requantize_node = match.node; const NodeDef& original_op_node = match.inputs[0].inputs[0].inputs[0].inputs[0].node; const NodeDef& fake_requantize_min_node = match.inputs[3].node; const NodeDef& fake_requantize_max_node = match.inputs[4].node; new_nodes->push_back(original_op_node); new_nodes->push_back(fake_requantize_min_node); new_nodes->push_back(fake_requantize_max_node); NodeDef requantize_node; requantize_node = fake_requantize_node; requantize_node.mutable_input()->Clear(); AddNodeInput(original_op_node.name() + ":0", &requantize_node); AddNodeInput(original_op_node.name() + ":1", &requantize_node); AddNodeInput(original_op_node.name() + ":2", &requantize_node); AddNodeInput(fake_requantize_min_node.name(), &requantize_node); AddNodeInput(fake_requantize_max_node.name(), &requantize_node); new_nodes->push_back(requantize_node); return OkStatus(); }, {}, output_graph_def)); return OkStatus(); } Status HoistFakeQuants(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef current_graph_def = input_graph_def; const int max_depth = 3; for (int depth = max_depth; depth > 0; --depth) { OpTypePattern pattern = {"*"}; for (int i = 0; i < depth; ++i) { pattern = {"*", {pattern}}; } pattern = {"FakeQuantWithMinMaxVars", {pattern, {"Const"}, {"Const"}}}; GraphDef hoisted_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( current_graph_def, pattern, [depth](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& fake_quant_node = match.node; const NodeDef& fake_quant_min_node = match.inputs[1].node; const NodeDef& fake_quant_max_node = match.inputs[2].node; std::vector<NodeDef> linear_nodes; NodeMatch current_match = match; for (int i = 0; i <= depth; ++i) { linear_nodes.push_back(current_match.inputs[0].node); current_match = current_match.inputs[0]; } NodeDef new_fake_quant_node; new_fake_quant_node = fake_quant_node; new_fake_quant_node.set_name(fake_quant_node.name() + "_hoisted"); new_fake_quant_node.set_input( 0, linear_nodes[linear_nodes.size() - 2].input(0)); new_nodes->push_back(new_fake_quant_node); new_nodes->push_back(fake_quant_min_node); new_nodes->push_back(fake_quant_max_node); linear_nodes[linear_nodes.size() - 2].set_input( 0, new_fake_quant_node.name()); linear_nodes.front().set_name(fake_quant_node.name()); for (const NodeDef& linear_node : linear_nodes) { new_nodes->push_back(linear_node); } return OkStatus(); }, {}, &hoisted_graph_def)); current_graph_def = hoisted_graph_def; } *output_graph_def = current_graph_def; return OkStatus(); } Status QuantizeNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::set<string> ops_to_ignore; if (context.params.count("ignore_op") > 0) { for (const string& name : context.params.at("ignore_op")) { ops_to_ignore.insert(name); } } const std::vector<QuantizedOpInfo>& op_list = GetQuantizedOpList(); string op_pattern; bool is_first = true; std::map<string, QuantizedOpInfo> op_map; for (const QuantizedOpInfo& op_info : op_list) { if (ops_to_ignore.count(op_info.float_name) == 0) { strings::StrAppend(&op_pattern, (is_first ? "" : "|"), op_info.float_name); op_map.insert({op_info.float_name, op_info}); is_first = false; } } GraphDef placeholder_graph_def; TF_RETURN_IF_ERROR( QuantizePlaceholders(input_graph_def, context, &placeholder_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(placeholder_graph_def)); GraphDef hoisted_graph_def; TF_RETURN_IF_ERROR( HoistFakeQuants(placeholder_graph_def, context, &hoisted_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(hoisted_graph_def)); GraphDef converted_graph_def; TF_RETURN_IF_ERROR(ConvertFakeQuantsToRequantize(hoisted_graph_def, context, &converted_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(converted_graph_def)); float fallback_min; float fallback_max; bool has_fallback_range; TF_RETURN_IF_ERROR(ExtractRangeFromParams( context, "fallback_min", "fallback_max", &fallback_min, &fallback_max, &has_fallback_range)); GraphDef quantized_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( converted_graph_def, {op_pattern}, [&op_map, fallback_min, fallback_max, has_fallback_range]( const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& float_node = match.node; const QuantizedOpInfo& op_info = op_map[float_node.op()]; DataTypeVector input_types; DataTypeVector output_types; TF_RETURN_IF_ERROR( GetInOutTypes(float_node, &input_types, &output_types)); bool are_all_float = true; for (int i = 0; i < float_node.input_size(); ++i) { if (op_info.unquantized_inputs.count(i)) { continue; } if (i >= input_types.size()) { LOG(ERROR) << "input_types has incorrect size " << input_types.size() << " <= " << i << ". Assuming everything else is floats."; } if (i < input_types.size() && input_types[i] != DT_FLOAT) { are_all_float = false; } } for (const DataType& output_type : output_types) { if (output_type != DT_FLOAT) { are_all_float = false; } } if (!are_all_float) { CopyOriginalMatch(match, new_nodes); return OkStatus(); } string namespace_prefix = float_node.name() + "_eightbit"; std::vector<string> quantized_input_names; for (int i = 0; i < float_node.input_size(); ++i) { if (op_info.unquantized_inputs.count(i)) { continue; } const string& input_name = float_node.input(i); string unique_input_name = namespace_prefix + "/" + UniqueNodeNameFromInput(input_name); NodeDef reshape_dims; reshape_dims.set_op("Const"); reshape_dims.set_name(unique_input_name + "/reshape_dims"); AddNodeInput("^" + NodeNameFromInput(input_name), &reshape_dims); SetNodeAttr("dtype", DT_INT32, &reshape_dims); Tensor reshape_dims_tensor(DT_INT32, {1}); reshape_dims_tensor.flat<int32>()(0) = -1; SetNodeTensorAttr<int32>("value", reshape_dims_tensor, &reshape_dims); new_nodes->push_back(reshape_dims); NodeDef reduction_dims; reduction_dims.set_op("Const"); reduction_dims.set_name(unique_input_name + "/reduction_dims"); AddNodeInput("^" + NodeNameFromInput(input_name), &reduction_dims); SetNodeAttr("dtype", DT_INT32, &reduction_dims); Tensor reduction_dims_tensor(DT_INT32, {1}); reduction_dims_tensor.flat<int32>()(0) = 0; SetNodeTensorAttr<int32>("value", reduction_dims_tensor, &reduction_dims); new_nodes->push_back(reduction_dims); NodeDef reshape_node; reshape_node.set_op("Reshape"); reshape_node.set_name(unique_input_name + "/reshape"); SetNodeAttr("T", DT_FLOAT, &reshape_node); AddNodeInput(input_name, &reshape_node); AddNodeInput(reshape_dims.name(), &reshape_node); new_nodes->push_back(reshape_node); NodeDef min_node; min_node.set_op("Min"); min_node.set_name(unique_input_name + "/min"); SetNodeAttr("T", DT_FLOAT, &min_node); SetNodeAttr("keep_dims", false, &min_node); AddNodeInput(reshape_node.name(), &min_node); AddNodeInput(reduction_dims.name(), &min_node); new_nodes->push_back(min_node); NodeDef max_node; max_node.set_op("Max"); max_node.set_name(unique_input_name + "/max"); SetNodeAttr("T", DT_FLOAT, &max_node); SetNodeAttr("keep_dims", false, &max_node); AddNodeInput(reshape_node.name(), &max_node); AddNodeInput(reduction_dims.name(), &max_node); new_nodes->push_back(max_node); NodeDef quantize_node; quantize_node.set_op("QuantizeV2"); quantize_node.set_name(unique_input_name + "/quantize"); SetNodeAttr("T", DT_QUINT8, &quantize_node); SetNodeAttr("mode", "MIN_FIRST", &quantize_node); AddNodeInput(input_name, &quantize_node); AddNodeInput(min_node.name(), &quantize_node); AddNodeInput(max_node.name(), &quantize_node); new_nodes->push_back(quantize_node); quantized_input_names.push_back(quantize_node.name()); } NodeDef quantized_main_node; quantized_main_node.set_op("Quantized" + float_node.op()); quantized_main_node.set_name(float_node.name() + "/eightbit"); for (const string& attr_to_copy : op_info.attrs_to_copy) { CopyNodeAttr(float_node, attr_to_copy, attr_to_copy, &quantized_main_node); } for (const std::pair<string, DataType>& dtype_to_set : op_info.dtypes_to_set) { SetNodeAttr(dtype_to_set.first, dtype_to_set.second, &quantized_main_node); } int quantized_input_index = 0; for (int i = 0; i < float_node.input_size(); ++i) { if (op_info.unquantized_inputs.count(i)) { AddNodeInput(float_node.input(i), &quantized_main_node); } else { const string& quantized_input_name = quantized_input_names[quantized_input_index]; AddNodeInput(quantized_input_name + ":0", &quantized_main_node); ++quantized_input_index; } } if (op_info.min_max_order == QuantizedOpInfo::CONTIGUOUS_MIN_MAX) { for (const string& quantized_input_name : quantized_input_names) { AddNodeInput(quantized_input_name + ":1", &quantized_main_node); AddNodeInput(quantized_input_name + ":2", &quantized_main_node); } } else { for (const string& quantized_input_name : quantized_input_names) { AddNodeInput(quantized_input_name + ":1", &quantized_main_node); } for (const string& quantized_input_name : quantized_input_names) { AddNodeInput(quantized_input_name + ":2", &quantized_main_node); } } new_nodes->push_back(quantized_main_node); string eight_bit_node_name; if (op_info.output_bit_depth == DT_QINT32) { string requantize_min_input; string requantize_max_input; if (has_fallback_range) { NodeDef fallback_min_node; fallback_min_node.set_op("Const"); fallback_min_node.set_name(quantized_main_node.name() + "/fallback_min"); SetNodeAttr("dtype", DT_FLOAT, &fallback_min_node); Tensor fallback_min_tensor(DT_FLOAT, {}); fallback_min_tensor.flat<float>()(0) = fallback_min; SetNodeTensorAttr<float>("value", fallback_min_tensor, &fallback_min_node); new_nodes->push_back(fallback_min_node); NodeDef fallback_max_node; fallback_max_node.set_op("Const"); fallback_max_node.set_name(quantized_main_node.name() + "/fallback_max"); SetNodeAttr("dtype", DT_FLOAT, &fallback_max_node); Tensor fallback_max_tensor(DT_FLOAT, {}); fallback_max_tensor.flat<float>()(0) = fallback_max; SetNodeTensorAttr<float>("value", fallback_max_tensor, &fallback_max_node); new_nodes->push_back(fallback_max_node); requantize_min_input = fallback_min_node.name(); requantize_max_input = fallback_max_node.name(); } else { NodeDef requant_range_node; requant_range_node.set_op("RequantizationRange"); requant_range_node.set_name(quantized_main_node.name() + "/requant_range"); SetNodeAttr("Tinput", DT_QINT32, &requant_range_node); AddNodeInput(quantized_main_node.name() + ":0", &requant_range_node); AddNodeInput(quantized_main_node.name() + ":1", &requant_range_node); AddNodeInput(quantized_main_node.name() + ":2", &requant_range_node); new_nodes->push_back(requant_range_node); requantize_min_input = requant_range_node.name() + ":0"; requantize_max_input = requant_range_node.name() + ":1"; } NodeDef requantize_node; requantize_node.set_op("Requantize"); requantize_node.set_name(quantized_main_node.name() + "/requantize"); SetNodeAttr("Tinput", DT_QINT32, &requantize_node); SetNodeAttr("out_type", DT_QUINT8, &requantize_node); AddNodeInput(quantized_main_node.name() + ":0", &requantize_node); AddNodeInput(quantized_main_node.name() + ":1", &requantize_node); AddNodeInput(quantized_main_node.name() + ":2", &requantize_node); AddNodeInput(requantize_min_input, &requantize_node); AddNodeInput(requantize_max_input, &requantize_node); new_nodes->push_back(requantize_node); eight_bit_node_name = requantize_node.name(); } else { eight_bit_node_name = quantized_main_node.name(); } NodeDef dequantize_node; dequantize_node.set_op("Dequantize"); dequantize_node.set_name(float_node.name()); SetNodeAttr("T", DT_QUINT8, &dequantize_node); SetNodeAttr("mode", "MIN_FIRST", &dequantize_node); AddNodeInput(eight_bit_node_name + ":0", &dequantize_node); AddNodeInput(eight_bit_node_name + ":1", &dequantize_node); AddNodeInput(eight_bit_node_name + ":2", &dequantize_node); new_nodes->push_back(dequantize_node); return OkStatus(); }, {}, &quantized_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(quantized_graph_def)); GraphDef merged_graph_def; TF_RETURN_IF_ERROR(MergeAdjacentRequantizes(quantized_graph_def, context, &merged_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(merged_graph_def)); GraphDef deduped_graph_def; TF_RETURN_IF_ERROR( MergeDuplicateNodes(merged_graph_def, context, &deduped_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(deduped_graph_def)); TF_RETURN_IF_ERROR(RemoveRedundantQuantizations(deduped_graph_def, context, output_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(*output_graph_def)); return OkStatus(); } REGISTER_GRAPH_TRANSFORM("quantize_nodes", QuantizeNodes); REGISTER_GRAPH_TRANSFORM("merge_duplicate_nodes", MergeDuplicateNodes); } }
#define EIGEN_USE_THREADS #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status QuantizeNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status RemoveRedundantQuantizations(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status QuantizePlaceholders(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status MergeAdjacentRequantizes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status HoistFakeQuants(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status MergeDuplicateNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class QuantizeNodesTest : public ::testing::Test { protected: void TestTransformedVersusFloatGraph( const TransformFunc& transform_function, const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& float_inputs, const std::vector<std::pair<string, Tensor>>& transformed_inputs, const std::vector<string>& output_names, const TransformFuncContext& in_context, double threshold, GraphDef* transformed_graph_def) { std::unique_ptr<Session> float_session(NewSession(SessionOptions())); TF_ASSERT_OK(float_session->Create(float_graph_def)); std::vector<Tensor> float_outputs; TF_ASSERT_OK( float_session->Run(float_inputs, output_names, {}, &float_outputs)); TransformFuncContext context(in_context); std::vector<string> input_names; for (const std::pair<const string&, const Tensor&> float_input : float_inputs) { context.input_names.push_back(float_input.first); } context.output_names = output_names; TF_ASSERT_OK( transform_function(float_graph_def, context, transformed_graph_def)); std::unique_ptr<Session> transformed_session(NewSession(SessionOptions())); TF_ASSERT_OK(transformed_session->Create(*transformed_graph_def)); std::vector<Tensor> transformed_outputs; TF_ASSERT_OK(transformed_session->Run(transformed_inputs, output_names, {}, &transformed_outputs)); const int output_count = output_names.size(); EXPECT_EQ(output_count, float_outputs.size()); EXPECT_EQ(output_count, transformed_outputs.size()); for (int i = 0; i < output_count; ++i) { test::ExpectTensorNear<float>(float_outputs[i], transformed_outputs[i], threshold); } } void TestQuantizedVersusFloatGraph( const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& inputs, const std::vector<string>& output_names) { GraphDef quantized_graph_def; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, inputs, inputs, output_names, {}, 1.0, &quantized_graph_def); const std::set<string> quantizable_ops = { "Add", "BiasAdd", "Concat", "Conv2D", "MatMul", "Relu", "Relu6", "ResizeBilinear", "AvgPool", "MaxPool", "Mul"}; for (const NodeDef& node : quantized_graph_def.node()) { EXPECT_EQ(0, quantizable_ops.count(node.op())) << "Found quantizable node " << node.op() << " for node named " << node.name(); } } void TestGraphWithInputRange( const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& float_inputs, const std::vector<string>& output_names, float range_min, float range_max) { TransformFuncContext context; context.params["input_min"] = {strings::StrCat(range_min)}; context.params["input_max"] = {strings::StrCat(range_max)}; std::vector<std::pair<string, Tensor>> quantized_inputs; for (const std::pair<string, Tensor>& float_input : float_inputs) { const Tensor& float_tensor = float_input.second; Tensor quantized_tensor(DT_QUINT8, float_tensor.shape()); FloatTensorToQuantizedInPlace<quint8>(float_tensor, range_min, range_max, &quantized_tensor); quantized_inputs.push_back({float_input.first, quantized_tensor}); } GraphDef quantized_graph_def; TestTransformedVersusFloatGraph( QuantizeNodes, float_graph_def, float_inputs, quantized_inputs, output_names, context, 1.0, &quantized_graph_def); } void TestGraphWithFallbackRange( const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& float_inputs, const std::vector<string>& output_names, float range_min, float range_max, GraphDef* quantized_graph_def) { TransformFuncContext context; context.params["fallback_min"] = {strings::StrCat(range_min)}; context.params["fallback_max"] = {strings::StrCat(range_max)}; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, float_inputs, float_inputs, output_names, context, 2.0, quantized_graph_def); } void TestIgnoreOps(std::initializer_list<string> ops_to_ignore) { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; auto const_op = [&](const string& name, const TensorShape& shape, std::initializer_list<float> values) { Tensor tensor(DT_FLOAT, shape); test::FillValues<float>(&tensor, values); return Const(root.WithOpName(name), Input::Initializer(tensor)); }; int m = 1; int n = 1; int k = 1; Output a_op = const_op("a_op", {m, k}, {2}); Output b_op = const_op("b_op", {k, n}, {3}); Output c_op = const_op("c_op", {m, k}, {1}); Output d_op = const_op("d_op", {k, n}, {4}); Output mat_mul_op = MatMul(root.WithOpName("mat_mul_op"), a_op, b_op); Output mul_op = Mul(root.WithOpName("mul"), c_op, d_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TransformFuncContext context; if (ops_to_ignore.size() > 0) { context.params["ignore_op"] = ops_to_ignore; } GraphDef quantized_graph_def; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {}, {"mat_mul_op", "mul"}, context, 1.0, &quantized_graph_def); for (const string& op_name : ops_to_ignore) { bool exists_in_quantized_graph = false; for (const NodeDef& node : quantized_graph_def.node()) { if (node.op() == op_name) { exists_in_quantized_graph = true; break; } } EXPECT_TRUE(exists_in_quantized_graph) << "Op " << op_name << " should not have been replace by a quantized version"; } } void TestQuantizeMatMul(int m, int n, int k, const std::vector<float>& a_values, const std::vector<float>& b_values) { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor a_tensor(DT_FLOAT, TensorShape({m, k})); test::FillValues<float>(&a_tensor, a_values); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Tensor b_tensor(DT_FLOAT, TensorShape({k, n})); test::FillValues<float>(&b_tensor, b_values); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output mat_mul_op = MatMul(root.WithOpName("mat_mul_op"), a_op, b_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"mat_mul_op"}); } void TestQuantizeMatMulTiny() { TestQuantizeMatMul(1, 1, 1, {2}, {3}); TestQuantizeMatMul(1, 2, 1, {1}, {2, 3}); TestQuantizeMatMul(1, 1, 2, {1, 1}, {1, 1}); TestQuantizeMatMul(1, 1, 2, {0, 0}, {1, 1}); TestQuantizeMatMul(1, 1, 2, {1, 2}, {1, 2}); } void TestQuantizeMatMulSmall() { TestQuantizeMatMul(2, 4, 3, {1, 2, 3, 4, 5, 6}, {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); } void TestQuantizeMul() { using namespace ::tensorflow::ops; std::vector<int64_t> x_shape({10, 100}); const size_t x_num_elements = TensorShape(x_shape).num_elements(); std::vector<float> x_values(x_num_elements); for (int i = 0; i < x_num_elements; ++i) { x_values[i] = (i % 256) / 256.0f; } std::vector<int64_t> y_shape({100}); const size_t y_num_elements = TensorShape(y_shape).num_elements(); std::vector<float> y_values(y_num_elements); for (int i = 0; i < y_num_elements; ++i) { y_values[i] = ((i + 23) % 123) - 50; } Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor)); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor)); Mul mul = Mul(root.WithOpName("mul"), x, y); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"mul"}); } void TestQuantizeAdd() { using namespace ::tensorflow::ops; std::vector<int64_t> x_shape({10, 100}); const size_t x_num_elements = TensorShape(x_shape).num_elements(); std::vector<float> x_values(x_num_elements); for (int i = 0; i < x_num_elements; ++i) { x_values[i] = (i % 256) / 256.0f; } std::vector<int64_t> y_shape({100}); const size_t y_num_elements = TensorShape(y_shape).num_elements(); std::vector<float> y_values(y_num_elements); for (int i = 0; i < y_num_elements; ++i) { y_values[i] = ((i + 23) % 123) - 50; } Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor)); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor)); Add add = Add(root.WithOpName("add"), x, y); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"add"}); } void TestQuantizeConv2D(int depth, int input_width, int input_height, int input_batch_count, int filter_size, int filter_count, int stride, const string& padding, const std::vector<float>& input_values, const std::vector<float>& filter_values) { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({input_batch_count, input_height, input_width, depth})); test::FillValues<float>(&input_tensor, input_values); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor filter_tensor( DT_FLOAT, TensorShape({filter_size, filter_size, depth, filter_count})); test::FillValues<float>(&filter_tensor, filter_values); Output filter_op = Const(root.WithOpName("filter_op"), Input::Initializer(filter_tensor)); Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, filter_op, {1, stride, stride, 1}, padding); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"conv_op"}); } void TestQuantizeBiasAdd() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"bias_add_op"}); } void TestQuantizeConcat() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor shape_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&shape_tensor, {0}); Output shape_op = Const(root.WithOpName("shape_op"), Input::Initializer(shape_tensor)); Tensor a_tensor(DT_FLOAT, TensorShape({2, 2, 3})); test::FillValues<float>(&a_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Tensor b_tensor(DT_FLOAT, TensorShape({2, 2, 3})); test::FillValues<float>(&b_tensor, {13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output concat_op = Concat(root.WithOpName("concat_op"), {a_op, b_op}, shape_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"concat_op"}); } void TestQuantizeRelu() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output relu_op = Relu(root.WithOpName("relu_op"), constant_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"relu_op"}); } void TestQuantizeRelu6() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output relu6_op = Relu6(root.WithOpName("relu6_op"), constant_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"relu6_op"}); } void TestQuantizeMaxPool() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output max_pool_op = MaxPool(root.WithOpName("max_pool_op"), constant_op, {1, 2, 2, 1}, {1, 1, 1, 1}, "SAME"); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"max_pool_op"}); } void TestQuantizeAvgPool() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output avg_pool_op = AvgPool(root.WithOpName("avg_pool_op"), constant_op, {1, 2, 2, 1}, {1, 1, 1, 1}, "SAME"); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"avg_pool_op"}); } void TestQuantizeReshape() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({4, 5})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output reshape_op = Reshape(root.WithOpName("reshape_op"), constant_op, {10, 2}); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"reshape_op"}); } void TestRemoveRedundantQuantization() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({})); test::FillValues<quint8>(&quantized_tensor, {0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), quantized_op, quantized_min_op, quantized_max_op); Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1}); Output dequantize_reshape_dims = Const(root.WithOpName("dequantize_reshape_dims"), Input::Initializer(dequantize_reshape_dims_tensor)); Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0}); Output dequantize_reduction_dims = Const(root.WithOpName("dequantize_reduction_dims"), Input::Initializer(dequantize_reduction_dims_tensor)); Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"), dequantize_op, dequantize_reshape_dims); Output dequantize_min = Min(root.WithOpName("dequantize_min"), dequantize_reshape, dequantize_reduction_dims, Min::Attrs().KeepDims(false)); Output dequantize_max = Max(root.WithOpName("dequantize_max"), dequantize_reshape, dequantize_reduction_dims, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, dequantize_min, dequantize_max, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize = Dequantize(root.WithOpName("final_dequantize"), quantize_op.output, quantize_op.output_min, quantize_op.output_max); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef removed_graph_def; TestTransformedVersusFloatGraph( RemoveRedundantQuantizations, float_graph_def, {}, {}, {"final_dequantize"}, {}, 1.0, &removed_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(removed_graph_def, &node_map); EXPECT_EQ(1, node_map.count("final_dequantize")); EXPECT_EQ("quantized_op", node_map.at("final_dequantize")->input(0)); } void TestRemoveRedundantQuantizationWithBiasAdd() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6})); test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Tensor offset_tensor(DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6}); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Tensor offset_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_min_tensor, {0.0f}); Output offset_min_op = Const(root.WithOpName("offset_min_op"), Input::Initializer(offset_min_tensor)); Tensor offset_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_max_tensor, {255.0f}); Output offset_max_op = Const(root.WithOpName("offset_max_op"), Input::Initializer(offset_max_tensor)); QuantizedBiasAdd quantized_bias_add_op( root.WithOpName("bias_add_op"), quantized_op, offset_op, quantized_min_op, quantized_max_op, offset_min_op, offset_max_op, DT_QINT32); RequantizationRange requantization_range_op( root.WithOpName("requantization_range_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out); Requantize requantize_op( root.WithOpName("requantize_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out, requantization_range_op.output_min, requantization_range_op.output_max, DT_QUINT8); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), requantize_op.output, requantize_op.output_min, requantize_op.output_max); Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1}); Output dequantize_reshape_dims = Const(root.WithOpName("dequantize_reshape_dims"), Input::Initializer(dequantize_reshape_dims_tensor)); Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0}); Output dequantize_reduction_dims = Const(root.WithOpName("dequantize_reduction_dims"), Input::Initializer(dequantize_reduction_dims_tensor)); Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"), dequantize_op, dequantize_reshape_dims); Output dequantize_min = Min(root.WithOpName("dequantize_min"), dequantize_reshape, dequantize_reduction_dims, Min::Attrs().KeepDims(false)); Output dequantize_max = Max(root.WithOpName("dequantize_max"), dequantize_reshape, dequantize_reduction_dims, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, dequantize_min, dequantize_max, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize = Dequantize(root.WithOpName("final_dequantize"), quantize_op.output, quantize_op.output_min, quantize_op.output_max); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef removed_graph_def; TestTransformedVersusFloatGraph( RemoveRedundantQuantizations, float_graph_def, {}, {}, {"final_dequantize"}, {}, 1.0, &removed_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(removed_graph_def, &node_map); EXPECT_EQ(1, node_map.count("final_dequantize")); EXPECT_EQ("requantize_op", node_map.at("final_dequantize")->input(0)); } void TestQuantizeResizeBilinear() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor size_tensor(DT_INT32, TensorShape({2})); test::FillValues<int32>(&size_tensor, {256, 256}); Output constant_op = Const(root.WithOpName("size_tensor_op"), Input::Initializer(size_tensor)); Output placeholder_op = Placeholder(root.WithOpName("placeholder_op"), DT_FLOAT); Output resize_bilinear_op = ResizeBilinear( root.WithOpName("resize_bilinear_op"), placeholder_op, constant_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); Tensor input_tensor(DT_FLOAT, {1, 128, 128, 3}); test::FillFn<float>(&input_tensor, [](int) { return 100.0f; }); TestQuantizedVersusFloatGraph(float_graph_def, {{"placeholder_op", input_tensor}}, {"resize_bilinear_op"}); } void TestRemoveRedundantQuantizationWithMultipleOutputs() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6})); test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Tensor offset_tensor(DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6}); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Tensor offset_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_min_tensor, {0.0f}); Output offset_min_op = Const(root.WithOpName("offset_min_op"), Input::Initializer(offset_min_tensor)); Tensor offset_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_max_tensor, {255.0f}); Output offset_max_op = Const(root.WithOpName("offset_max_op"), Input::Initializer(offset_max_tensor)); QuantizedBiasAdd quantized_bias_add_op( root.WithOpName("bias_add_op"), quantized_op, offset_op, quantized_min_op, quantized_max_op, offset_min_op, offset_max_op, DT_QINT32); RequantizationRange requantization_range_op( root.WithOpName("requantization_range_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out); Requantize requantize_op( root.WithOpName("requantize_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out, requantization_range_op.output_min, requantization_range_op.output_max, DT_QUINT8); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), requantize_op.output, requantize_op.output_min, requantize_op.output_max); Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1}); Output dequantize_reshape_dims = Const(root.WithOpName("dequantize_reshape_dims"), Input::Initializer(dequantize_reshape_dims_tensor)); Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0}); Output dequantize_reduction_dims = Const(root.WithOpName("dequantize_reduction_dims"), Input::Initializer(dequantize_reduction_dims_tensor)); Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"), dequantize_op, dequantize_reshape_dims); Output dequantize_min = Min(root.WithOpName("dequantize_min"), dequantize_reshape, dequantize_reduction_dims, Min::Attrs().KeepDims(false)); Output dequantize_max = Max(root.WithOpName("dequantize_max"), dequantize_reshape, dequantize_reduction_dims, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, dequantize_min, dequantize_max, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize = Dequantize(root.WithOpName("final_dequantize"), quantize_op.output, quantize_op.output_min, quantize_op.output_max); Output relu_op = Relu(root.WithOpName("relu_op"), dequantize_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef removed_graph_def; TestTransformedVersusFloatGraph( RemoveRedundantQuantizations, float_graph_def, {}, {}, {"final_dequantize", "relu_op"}, {}, 1.0, &removed_graph_def); std::map<string, int> op_type_count; for (const NodeDef& node : removed_graph_def.node()) { ++op_type_count[node.op()]; } EXPECT_EQ(2, op_type_count["Dequantize"]); } void TestQuantizePlaceholders() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Output placeholder_op = Placeholder(root.WithOpName("placeholder_op"), DT_FLOAT); Output relu_op = Relu(root.WithOpName("relu_op"), placeholder_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TransformFuncContext context; context.input_names = {"placeholder_op"}; context.output_names = {"relu_op"}; context.params = {{"input_min", {"-10.0"}}, {"input_max", {"10.0"}}}; GraphDef quantized_graph_def; TF_ASSERT_OK( QuantizePlaceholders(float_graph_def, context, &quantized_graph_def)); Tensor input_tensor(DT_FLOAT, {}); input_tensor.flat<float>()(0) = 5.0f; TestQuantizedVersusFloatGraph( float_graph_def, {{"placeholder_op", input_tensor}}, {"relu_op"}); std::map<string, const NodeDef*> node_map; MapNamesToNodes(quantized_graph_def, &node_map); EXPECT_NE("placeholder_op", node_map.at("relu_op")->input(0)); EXPECT_EQ("Placeholder", node_map.at("placeholder_op")->op()); EXPECT_EQ(DT_QUINT8, node_map.at("placeholder_op")->attr().at("dtype").type()); } void TestInputRange() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({1, width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output bias_add = BiasAdd(root.WithOpName("bias_add"), a_const, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); Tensor placeholder_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&placeholder_tensor, 1.0f); TestGraphWithInputRange(graph_def, {{"placeholder", placeholder_tensor}}, {"bias_add"}, 0.0f, 100.0f); } void TestFallbackRange() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({1, width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output bias_add = BiasAdd(root.WithOpName("bias_add"), a_const, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); Tensor placeholder_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&placeholder_tensor, 1.0f); GraphDef quantized_graph_def; TestGraphWithFallbackRange(graph_def, {{"placeholder", placeholder_tensor}}, {"bias_add"}, 0.0f, 200.0f, &quantized_graph_def); for (const NodeDef& node : quantized_graph_def.node()) { EXPECT_NE("RequantizationRange", node.op()); } } void TestConvertFakeQuantsToRequantize() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_min_tensor, {0.0f}); Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"), Input::Initializer(fake_quant_min_tensor)); Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_max_tensor, {18.0f}); Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"), Input::Initializer(fake_quant_max_tensor)); Output fake_quant_op = FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), bias_add_op, fake_quant_min_op, fake_quant_max_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(ConvertFakeQuantsToRequantize, float_graph_def, {}, {}, {"fake_quant_op"}, {}, 1.0, &converted_graph_def); for (const NodeDef& node : converted_graph_def.node()) { EXPECT_NE("FakeQuantWithMinMaxVars", node.op()); } } void TestMergeAdjacentRequantizes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_QUINT8, TensorShape({1, 1, 2, 6})); test::FillValues<quint8>(&input_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor input_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&input_min_tensor, {0.0f}); Output input_min_op = Const(root.WithOpName("input_min_op"), Input::Initializer(input_min_tensor)); Tensor input_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&input_max_tensor, {255.0f}); Output input_max_op = Const(root.WithOpName("input_max_op"), Input::Initializer(input_max_tensor)); Tensor offset_tensor(DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6}); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Tensor offset_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_min_tensor, {0.0f}); Output offset_min_op = Const(root.WithOpName("offset_min_op"), Input::Initializer(offset_min_tensor)); Tensor offset_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_max_tensor, {255.0f}); Output offset_max_op = Const(root.WithOpName("offset_max_op"), Input::Initializer(offset_max_tensor)); QuantizedBiasAdd quantized_bias_add_op( root.WithOpName("quantized_bias_add_op"), input_op, offset_op, input_min_op, input_max_op, offset_min_op, offset_max_op, DT_QINT32); RequantizationRange requantization_range_op( root.WithOpName("requantization_range_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out); Requantize requantize_op( root.WithOpName("requantize_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out, requantization_range_op.output_min, requantization_range_op.output_max, DT_QUINT8); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), requantize_op.output, requantize_op.output_min, requantize_op.output_max, Dequantize::Attrs().Mode("MIN_FIRST")); Tensor quantize_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantize_min_tensor, {0.0f}); Output quantize_min_op = Const(root.WithOpName("quantize_min_op"), Input::Initializer(quantize_min_tensor)); Tensor quantize_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantize_max_tensor, {255.0f}); Output quantize_max_op = Const(root.WithOpName("quantize_max_op"), Input::Initializer(quantize_max_tensor)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, quantize_min_op, quantize_max_op, DT_QINT32, QuantizeV2::Attrs().Mode("MIN_FIRST")); Tensor fake_requantize_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_requantize_min_tensor, {0.0f}); Output fake_requantize_min_op = Const(root.WithOpName("fake_requantize_min_op"), Input::Initializer(fake_requantize_min_tensor)); Tensor fake_requantize_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_requantize_max_tensor, {255.0f}); Output fake_requantize_max_op = Const(root.WithOpName("fake_requantize_max_op"), Input::Initializer(fake_requantize_max_tensor)); Requantize fake_requantize_op( root.WithOpName("fake_requantize_op"), quantize_op.output, quantize_op.output_min, quantize_op.output_max, fake_requantize_min_op, fake_requantize_max_op, DT_QUINT8); Output fake_dequantize_op = Dequantize( root.WithOpName("fake_dequantize_op"), fake_requantize_op.output, fake_requantize_op.output_min, fake_requantize_op.output_max, Dequantize::Attrs().Mode("MIN_FIRST")); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(MergeAdjacentRequantizes, float_graph_def, {}, {}, {"fake_dequantize_op"}, {}, 1.0, &converted_graph_def); int requantize_count = 0; for (const NodeDef& node : converted_graph_def.node()) { if (node.op() == "Requantize") { ++requantize_count; } } EXPECT_EQ(1, requantize_count); } void TestConvertFakeQuantsEndToEnd() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_min_tensor, {0.0f}); Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"), Input::Initializer(fake_quant_min_tensor)); Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_max_tensor, {18.0f}); Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"), Input::Initializer(fake_quant_max_tensor)); Output fake_quant_op = FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), bias_add_op, fake_quant_min_op, fake_quant_max_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {}, {"fake_quant_op"}, {}, 1.0, &converted_graph_def); int requantize_count = 0; for (const NodeDef& node : converted_graph_def.node()) { EXPECT_NE("FakeQuantWithMinMaxVars", node.op()); if (node.op() == "Requantize") { ++requantize_count; } } EXPECT_EQ(1, requantize_count); } void TestHoistFakeQuants() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); Output relu_op = Relu(root.WithOpName("relu_op"), bias_add_op); Output max_pool_op = MaxPool(root.WithOpName("max_pool_op"), relu_op, {1, 2, 2, 1}, {1, 1, 1, 1}, "SAME"); Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_min_tensor, {0.0f}); Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"), Input::Initializer(fake_quant_min_tensor)); Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_max_tensor, {18.0f}); Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"), Input::Initializer(fake_quant_max_tensor)); Output fake_quant_op = FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), max_pool_op, fake_quant_min_op, fake_quant_max_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(HoistFakeQuants, float_graph_def, {}, {}, {"fake_quant_op"}, {}, 1.0, &converted_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(converted_graph_def, &node_map); EXPECT_EQ("MaxPool", node_map.at("fake_quant_op")->op()); EXPECT_EQ("FakeQuantWithMinMaxVars", node_map.at(node_map.at("relu_op")->input(0))->op()); } void TestMergeDuplicateQuantizes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({})); test::FillValues<quint8>(&quantized_tensor, {0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), quantized_op, quantized_min_op, quantized_max_op); Tensor quantize_reshape_dims1_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&quantize_reshape_dims1_tensor, {-1}); Output quantize_reshape_dims1 = Const(root.WithOpName("dequantize_reshape_dims1"), Input::Initializer(quantize_reshape_dims1_tensor)); Tensor quantize_reduction_dims1_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&quantize_reduction_dims1_tensor, {0}); Output quantize_reduction_dims1 = Const(root.WithOpName("quantize_reduction_dims1"), Input::Initializer(quantize_reduction_dims1_tensor)); Output quantize_reshape1 = Reshape(root.WithOpName("quantize_reshape1"), dequantize_op, quantize_reshape_dims1); Output quantize_min1 = Min(root.WithOpName("quantize_min1"), quantize_reshape1, quantize_reduction_dims1, Min::Attrs().KeepDims(false)); Output quantize_max1 = Max(root.WithOpName("quantize_max1"), quantize_reshape1, quantize_reduction_dims1, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op1(root.WithOpName("quantize_op1"), dequantize_op, quantize_min1, quantize_max1, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Tensor quantize_reshape_dims2_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&quantize_reshape_dims2_tensor, {-1}); Output quantize_reshape_dims2 = Const(root.WithOpName("dequantize_reshape_dims2"), Input::Initializer(quantize_reshape_dims2_tensor)); Tensor quantize_reduction_dims2_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&quantize_reduction_dims2_tensor, {0}); Output quantize_reduction_dims2 = Const(root.WithOpName("quantize_reduction_dims2"), Input::Initializer(quantize_reduction_dims2_tensor)); Output quantize_reshape2 = Reshape(root.WithOpName("quantize_reshape2"), dequantize_op, quantize_reshape_dims2); Output quantize_min2 = Min(root.WithOpName("quantize_min2"), quantize_reshape2, quantize_reduction_dims2, Min::Attrs().KeepDims(false)); Output quantize_max2 = Max(root.WithOpName("quantize_max2"), quantize_reshape2, quantize_reduction_dims2, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op2(root.WithOpName("quantize_op2"), dequantize_op, quantize_min1, quantize_max1, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize1 = Dequantize(root.WithOpName("final_dequantize1"), quantize_op1.output, quantize_op1.output_min, quantize_op1.output_max); Output final_dequantize2 = Dequantize(root.WithOpName("final_dequantize2"), quantize_op2.output, quantize_op2.output_min, quantize_op2.output_max); Output add_op = Add(root.WithOpName("add_op"), final_dequantize1, final_dequantize2); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {}, {}, {"add_op"}, {}, 1.0, &merged_graph_def); std::map<string, int> op_map; for (const NodeDef& node : merged_graph_def.node()) { ++op_map[node.op()]; } EXPECT_EQ(1, op_map["QuantizeV2"]); } void TestMergeDuplicateConsts() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_tensor, 1.0f); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Tensor b_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_tensor, 1.0f); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output add_op = Add(root.WithOpName("add_op"), a_op, b_op); Tensor c_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_tensor, 2.0f); Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor)); Output mul_op = Mul(root.WithOpName("mul_op"), add_op, c_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {}, {}, {"mul_op"}, {}, 1.0, &merged_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(merged_graph_def, &node_map); EXPECT_EQ(1, (node_map.count("a_op") + node_map.count("b_op"))); string remaining_const; if (node_map.count("a_op")) { remaining_const = "a_op"; } else { remaining_const = "b_op"; } EXPECT_EQ(remaining_const, node_map["add_op"]->input(0)); EXPECT_EQ(remaining_const, node_map["add_op"]->input(1)); EXPECT_EQ(1, node_map.count("c_op")); EXPECT_EQ("add_op", node_map["mul_op"]->input(0)); EXPECT_EQ("c_op", node_map["mul_op"]->input(1)); } void TestMergeDuplicatesNested() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_tensor, 1.0f); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op); Tensor b_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_tensor, 1.0f); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op); Output add_op = Add(root.WithOpName("add_op"), a_relu_op, b_relu_op); Tensor c_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_tensor, 2.0f); Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor)); Output mul_op = Mul(root.WithOpName("mul_op"), add_op, c_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {}, {}, {"mul_op"}, {}, 1.0, &merged_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(merged_graph_def, &node_map); EXPECT_EQ(1, (node_map.count("a_op") + node_map.count("b_op"))); EXPECT_EQ(1, (node_map.count("a_relu_op") + node_map.count("b_relu_op"))); string remaining_relu; if (node_map.count("a_relu_op")) { remaining_relu = "a_relu_op"; } else { remaining_relu = "b_relu_op"; } EXPECT_EQ(remaining_relu, node_map["add_op"]->input(0)); EXPECT_EQ(remaining_relu, node_map["add_op"]->input(1)); EXPECT_EQ(1, node_map.count("c_op")); EXPECT_EQ("add_op", node_map["mul_op"]->input(0)); EXPECT_EQ("c_op", node_map["mul_op"]->input(1)); } void TestMergeDuplicatesInOut() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_tensor, 1.0f); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op); Tensor b_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_tensor, 1.0f); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op); Output add_op = Add(root.WithOpName("add_op"), a_relu_op, b_relu_op); Tensor c_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_tensor, 2.0f); Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor)); Output mul_op1 = Mul(root.WithOpName("mul_op1"), add_op, c_op); Output mul_op2 = Mul(root.WithOpName("mul_op2"), add_op, c_op); Output mul_op3 = Mul(root.WithOpName("mul_op3"), add_op, c_op); Output final_mul_op = Mul(root.WithOpName("final_mul_op"), mul_op2, mul_op3); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {{"a_op", a_tensor}}, {{"a_op", a_tensor}}, {"mul_op1", "final_mul_op"}, {}, 1.0, &merged_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(merged_graph_def, &node_map); EXPECT_EQ(1, node_map.count("a_op")); EXPECT_EQ(1, node_map.count("b_op")); EXPECT_EQ(1, node_map.count("a_relu_op")); EXPECT_EQ(1, node_map.count("b_relu_op")); EXPECT_EQ(1, node_map.count("mul_op1")); EXPECT_EQ(1, node_map.count("final_mul_op")); EXPECT_EQ(1, (node_map.count("mul_op2") + node_map.count("mul_op3"))); string remaining_mul; if (node_map.count("mul_op2")) { remaining_mul = "mul_op2"; } else { remaining_mul = "mul_op3"; } EXPECT_EQ(remaining_mul, node_map["final_mul_op"]->input(0)); EXPECT_EQ(remaining_mul, node_map["final_mul_op"]->input(1)); EXPECT_EQ(1, node_map.count("c_op")); EXPECT_EQ("add_op", node_map["mul_op1"]->input(0)); EXPECT_EQ("c_op", node_map["mul_op1"]->input(1)); } void TestExcludeNonFloat() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor int_constant_tensor(DT_INT32, TensorShape({4, 5})); test::FillIota<int32>(&int_constant_tensor, 1); Output int_constant = Const(root.WithOpName("int_constant"), Input::Initializer(int_constant_tensor)); Tensor float_constant_tensor(DT_FLOAT, TensorShape({4, 5})); test::FillIota<float>(&float_constant_tensor, 2.0f); Output float_constant = Const(root.WithOpName("float_constant"), Input::Initializer(float_constant_tensor)); Output excluded_reshape_op = Reshape(root.WithOpName("excluded_reshape_op"), int_constant, {10, 2}); Output included_reshape_op = Reshape(root.WithOpName("included_reshape_op"), float_constant, {10, 2}); Output excluded_relu_op = Relu(root.WithOpName("excluded_relu_op"), excluded_reshape_op); Output excluded_float_caster = Cast( root.WithOpName("excluded_float_caster"), excluded_relu_op, DT_FLOAT); Output included_relu_op = Relu(root.WithOpName("included_relu_op"), included_reshape_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef quantized_graph_def; TestTransformedVersusFloatGraph( QuantizeNodes, float_graph_def, {}, {}, {"excluded_float_caster", "included_relu_op"}, {}, 1.0, &quantized_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(quantized_graph_def, &node_map); ASSERT_EQ(1, node_map.count("excluded_reshape_op")); EXPECT_EQ("Reshape", node_map.at("excluded_reshape_op")->op()); ASSERT_EQ(1, node_map.count("included_reshape_op")); EXPECT_EQ("Dequantize", node_map.at("included_reshape_op")->op()); } }; TEST_F(QuantizeNodesTest, TestIgnoreOps) { TestIgnoreOps({}); TestIgnoreOps({"MatMul"}); TestIgnoreOps({"MatMul", "Mul"}); } TEST_F(QuantizeNodesTest, TestQuantizeMatMulTiny) { TestQuantizeMatMulTiny(); } TEST_F(QuantizeNodesTest, TestQuantizeMatMulSmall) { TestQuantizeMatMulSmall(); } TEST_F(QuantizeNodesTest, TestQuantizeMul) { TestQuantizeMul(); } TEST_F(QuantizeNodesTest, TestQuantizeAdd) { TestQuantizeAdd(); } TEST_F(QuantizeNodesTest, TestOddPaddingProblem) { TestQuantizeConv2D(1, 4, 4, 1, 3, 1, 2, "SAME", {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); } TEST_F(QuantizeNodesTest, TestQuantizeConv2D) { TestQuantizeConv2D(1, 4, 3, 1, 3, 1, 1, "SAME", {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, {1, 4, 7, 2, 5, 8, 3, 6, 9}); } TEST_F(QuantizeNodesTest, TestQuantizeBiasAdd) { TestQuantizeBiasAdd(); } TEST_F(QuantizeNodesTest, TestQuantizeConcat) { TestQuantizeConcat(); } TEST_F(QuantizeNodesTest, TestQuantizeRelu) { TestQuantizeRelu(); } TEST_F(QuantizeNodesTest, TestQuantizeRelu6) { TestQuantizeRelu6(); } TEST_F(QuantizeNodesTest, TestQuantizeMaxPool) { TestQuantizeMaxPool(); } TEST_F(QuantizeNodesTest, TestQuantizeAvgPool) { TestQuantizeAvgPool(); } TEST_F(QuantizeNodesTest, TestQuantizeReshape) { TestQuantizeReshape(); } TEST_F(QuantizeNodesTest, TestQuantizeResizeBilinear) { TestQuantizeResizeBilinear(); } TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantization) { TestRemoveRedundantQuantization(); } TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithBiasAdd) { TestRemoveRedundantQuantizationWithBiasAdd(); } TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithMultipleOutputs) { TestRemoveRedundantQuantizationWithMultipleOutputs(); } TEST_F(QuantizeNodesTest, TestQuantizePlaceholders) { TestQuantizePlaceholders(); } TEST_F(QuantizeNodesTest, TestInputRange) { TestInputRange(); } TEST_F(QuantizeNodesTest, TestFallbackRange) { TestFallbackRange(); } TEST_F(QuantizeNodesTest, TestConvertFakeQuantsToRequantize) { TestConvertFakeQuantsToRequantize(); } TEST_F(QuantizeNodesTest, TestMergeAdjacentRequantizes) { TestMergeAdjacentRequantizes(); } TEST_F(QuantizeNodesTest, TestConvertFakeQuantsEndToEnd) { TestConvertFakeQuantsEndToEnd(); } TEST_F(QuantizeNodesTest, TestHoistFakeQuants) { TestHoistFakeQuants(); } TEST_F(QuantizeNodesTest, TestMergeDuplicateQuantizes) { TestMergeDuplicateQuantizes(); } TEST_F(QuantizeNodesTest, TestMergeDuplicateConsts) { TestMergeDuplicateConsts(); } TEST_F(QuantizeNodesTest, TestMergeDuplicatesNested) { TestMergeDuplicatesNested(); } TEST_F(QuantizeNodesTest, TestMergeDuplicateInOut) { TestMergeDuplicatesInOut(); } TEST_F(QuantizeNodesTest, TestExcludeNonFloat) { TestExcludeNonFloat(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6a5f5f26-d97a-447b-bd03-5b3dc414fadb
cpp
tensorflow/tensorflow
insert_logging
tensorflow/tools/graph_transforms/insert_logging.cc
tensorflow/tools/graph_transforms/insert_logging_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status InsertLogging(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::unordered_set<string> ops; bool has_ops; if (context.params.count("op")) { has_ops = true; for (const string& op : context.params.at("op")) { ops.insert(op); } } else { has_ops = false; } std::unordered_set<string> prefixes; bool has_prefixes; if (context.params.count("prefix")) { has_prefixes = true; for (const string& prefix : context.params.at("prefix")) { prefixes.insert(prefix); } } else { has_prefixes = false; } string message; TF_RETURN_IF_ERROR(context.GetOneStringParameter("message", "", &message)); bool show_name; TF_RETURN_IF_ERROR( context.GetOneBoolParameter("show_name", false, &show_name)); bool show_op; TF_RETURN_IF_ERROR(context.GetOneBoolParameter("show_op", false, &show_op)); int32_t first_n; TF_RETURN_IF_ERROR(context.GetOneInt32Parameter("first_n", -1, &first_n)); int32_t summarize; TF_RETURN_IF_ERROR( context.GetOneInt32Parameter("summarize", 1024, &summarize)); std::unordered_map<string, std::set<int>> node_outputs; for (const NodeDef& node : input_graph_def.node()) { for (const string& input : node.input()) { const string canonical_input = CanonicalInputName(input); string prefix; string name; string suffix; NodeNamePartsFromInput(canonical_input, &prefix, &name, &suffix); const string output_index_string = suffix.substr(1, suffix.size() - 1); int32_t output_index; if (!strings::safe_strto32(output_index_string, &output_index)) { return errors::InvalidArgument("Couldn't understand output number in ", input); } node_outputs[name].insert(output_index); } } std::map<string, string> inputs_to_rename; std::unordered_set<string> ignore_when_renaming; GraphDef logged_graph_def; for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = logged_graph_def.mutable_node()->Add(); *new_node = node; if (node_outputs[node.name()].empty()) { continue; } const bool op_matches = (ops.count(node.op()) > 0); bool prefix_matches = false; for (const string& prefix : prefixes) { if (absl::StartsWith(node.name(), prefix)) { prefix_matches = true; } } if ((!has_ops || op_matches) && (!has_prefixes || prefix_matches)) { const string name_suffix = "__print__"; DataTypeVector input_types; DataTypeVector output_types; TF_RETURN_IF_ERROR(GetInOutTypes(node, &input_types, &output_types)); NodeDef* print_node = logged_graph_def.mutable_node()->Add(); print_node->set_op("Print"); print_node->set_name(strings::StrCat(node.name(), name_suffix)); string node_message; if (show_op) { node_message += ";" + node.op() + ";"; } if (show_name) { node_message += ";" + print_node->name() + ";"; } node_message += message; SetNodeAttr("message", node_message, print_node); SetNodeAttr("first_n", first_n, print_node); SetNodeAttr("summarize", summarize, print_node); print_node->add_input(node.name() + ":0"); SetNodeAttr("T", output_types[0], print_node); for (int output_index : node_outputs[node.name()]) { print_node->add_input(strings::StrCat(node.name(), ":", output_index)); } SetNodeAttr("U", output_types, print_node); ignore_when_renaming.insert(print_node->name()); inputs_to_rename[node.name() + ":0"] = strings::StrCat(node.name(), name_suffix, ":0"); } } output_graph_def->Clear(); return RenameNodeInputs(logged_graph_def, inputs_to_rename, ignore_when_renaming, output_graph_def); } REGISTER_GRAPH_TRANSFORM("insert_logging", InsertLogging); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status InsertLogging(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class InsertLoggingTest : public ::testing::Test { protected: void CheckGraphCanRun(const GraphDef& graph_def, const std::vector<string>& output_names) { std::unique_ptr<Session> session(NewSession(SessionOptions())); TF_ASSERT_OK(session->Create(graph_def)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, output_names, {}, &outputs)); } void TestInsertLogging() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor const_tensor(DT_FLOAT, TensorShape({10})); test::FillIota<float>(&const_tensor, 1.0f); Output const_node1 = Const(root.WithOpName("const_node1"), Input::Initializer(const_tensor)); Output const_node2 = Const(root.WithOpName("const_node2"), Input::Initializer(const_tensor)); Output const_node3 = Const(root.WithOpName("const_node3"), Input::Initializer(const_tensor)); Output add_node2 = Add(root.WithOpName("add_node2"), const_node1, const_node2); Output add_node3 = Add(root.WithOpName("add_node3"), const_node1, const_node3); Output mul_node1 = Mul(root.WithOpName("mul_node1"), add_node2, add_node3); Output add_node4 = Add(root.WithOpName("add_node4"), mul_node1, const_node3); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); CheckGraphCanRun(graph_def, {"add_node4"}); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"add_node4"}; TF_ASSERT_OK(InsertLogging(graph_def, context, &result)); CheckGraphCanRun(result, {"add_node4"}); std::unordered_set<string> print_inputs; for (const NodeDef& node : result.node()) { if (node.op() == "Print") { print_inputs.insert(node.input(0)); } } EXPECT_EQ(6, print_inputs.size()); EXPECT_EQ(1, print_inputs.count("mul_node1:0")); EXPECT_EQ(1, print_inputs.count("add_node2:0")); EXPECT_EQ(1, print_inputs.count("add_node3:0")); EXPECT_EQ(0, print_inputs.count("add_node4:0")); EXPECT_EQ(1, print_inputs.count("const_node1:0")); EXPECT_EQ(1, print_inputs.count("const_node2:0")); EXPECT_EQ(1, print_inputs.count("const_node3:0")); } void TestInsertLoggingByOpType() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor const_tensor(DT_FLOAT, TensorShape({10})); test::FillIota<float>(&const_tensor, 1.0f); Output const_node1 = Const(root.WithOpName("const_node1"), Input::Initializer(const_tensor)); Output const_node2 = Const(root.WithOpName("const_node2"), Input::Initializer(const_tensor)); Output const_node3 = Const(root.WithOpName("const_node3"), Input::Initializer(const_tensor)); Output add_node2 = Add(root.WithOpName("add_node2"), const_node1, const_node2); Output add_node3 = Add(root.WithOpName("add_node3"), const_node1, const_node3); Output mul_node1 = Mul(root.WithOpName("mul_node1"), add_node2, add_node3); Output add_node4 = Add(root.WithOpName("add_node4"), mul_node1, const_node3); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); CheckGraphCanRun(graph_def, {"add_node4"}); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"add_node4"}; context.params.insert( std::pair<string, std::vector<string>>({"op", {"Mul", "Add"}})); TF_ASSERT_OK(InsertLogging(graph_def, context, &result)); CheckGraphCanRun(result, {"add_node4"}); std::unordered_set<string> print_inputs; for (const NodeDef& node : result.node()) { if (node.op() == "Print") { print_inputs.insert(node.input(0)); } } EXPECT_EQ(3, print_inputs.size()); EXPECT_EQ(1, print_inputs.count("mul_node1:0")); EXPECT_EQ(1, print_inputs.count("add_node2:0")); EXPECT_EQ(1, print_inputs.count("add_node3:0")); EXPECT_EQ(0, print_inputs.count("add_node4:0")); EXPECT_EQ(0, print_inputs.count("const_node1:0")); EXPECT_EQ(0, print_inputs.count("const_node2:0")); EXPECT_EQ(0, print_inputs.count("const_node3:0")); } void TestInsertLoggingByPrefix() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor const_tensor(DT_FLOAT, TensorShape({10})); test::FillIota<float>(&const_tensor, 1.0f); Output const_node1 = Const(root.WithOpName("const_node1"), Input::Initializer(const_tensor)); Output const_node2 = Const(root.WithOpName("const_node2"), Input::Initializer(const_tensor)); Output const_node3 = Const(root.WithOpName("const_node3"), Input::Initializer(const_tensor)); Output add_node2 = Add(root.WithOpName("add_node2"), const_node1, const_node2); Output add_node3 = Add(root.WithOpName("add_node3"), const_node1, const_node3); Output mul_node1 = Mul(root.WithOpName("mul_node1"), add_node2, add_node3); Output add_node4 = Add(root.WithOpName("add_node4"), mul_node1, const_node3); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); CheckGraphCanRun(graph_def, {"add_node4"}); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"add_node4"}; context.params.insert( std::pair<string, std::vector<string>>({"prefix", {"add_node"}})); TF_ASSERT_OK(InsertLogging(graph_def, context, &result)); CheckGraphCanRun(result, {"add_node4"}); std::unordered_set<string> print_inputs; for (const NodeDef& node : result.node()) { if (node.op() == "Print") { print_inputs.insert(node.input(0)); } } EXPECT_EQ(2, print_inputs.size()); EXPECT_EQ(0, print_inputs.count("mul_node1:0")); EXPECT_EQ(1, print_inputs.count("add_node2:0")); EXPECT_EQ(1, print_inputs.count("add_node3:0")); EXPECT_EQ(0, print_inputs.count("add_node4:0")); EXPECT_EQ(0, print_inputs.count("const_node1:0")); EXPECT_EQ(0, print_inputs.count("const_node2:0")); EXPECT_EQ(0, print_inputs.count("const_node3:0")); } }; TEST_F(InsertLoggingTest, TestInsertLogging) { TestInsertLogging(); } TEST_F(InsertLoggingTest, TestInsertLoggingByOpType) { TestInsertLoggingByOpType(); } TEST_F(InsertLoggingTest, TestInsertLoggingByPrefix) { TestInsertLoggingByPrefix(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/insert_logging.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/insert_logging_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ea00a1a5-d830-4939-b4a6-4a3529b5083a
cpp
tensorflow/tensorflow
freeze_requantization_ranges
tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
tensorflow/tools/graph_transforms/freeze_requantization_ranges_test.cc
#include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { struct MinMaxRecord { string name; float min; float max; }; Status ExtractMinMaxRecords(const string& log_file_name, std::vector<MinMaxRecord>* records) { string file_data; TF_RETURN_IF_ERROR( ReadFileToString(Env::Default(), log_file_name, &file_data)); const string print_suffix("__print__"); const string requant_prefix("__requant_min_max:"); std::vector<string> file_lines = str_util::Split(file_data, '\n'); for (const string& file_line : file_lines) { if (!absl::StrContains(file_line, print_suffix + ";" + requant_prefix)) { continue; } std::vector<string> line_parts = str_util::Split(file_line, ';'); if (line_parts.size() < 2) { continue; } bool min_max_found = false; int min_max_index; for (int i = 1; i < line_parts.size(); ++i) { if (absl::StartsWith(line_parts[i], requant_prefix)) { min_max_found = true; min_max_index = i; } } if (!min_max_found) { continue; } string min_max_string = line_parts[min_max_index]; std::vector<string> min_max_parts = str_util::Split(min_max_string, '['); if ((min_max_parts.size() != 3) || (min_max_parts[0] != requant_prefix)) { continue; } string min_string = min_max_parts[1]; std::vector<string> min_string_parts = str_util::Split(min_string, ']'); if (min_string_parts.size() != 2) { continue; } string min_number_string = min_string_parts[0]; float min; if (!strings::safe_strtof(min_number_string.c_str(), &min)) { continue; } string max_string = min_max_parts[2]; std::vector<string> max_string_parts = str_util::Split(max_string, ']'); if (max_string_parts.size() != 2) { continue; } string max_number_string = max_string_parts[0]; float max; if (!strings::safe_strtof(max_number_string.c_str(), &max)) { continue; } StringPiece name_string = line_parts[min_max_index - 1]; if (!str_util::EndsWith(name_string, print_suffix)) { continue; } string name( name_string.substr(0, name_string.size() - print_suffix.size())); records->push_back({name, min, max}); } return OkStatus(); } Status FreezeRequantizationRanges(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { string min_max_log_file; TF_RETURN_IF_ERROR( context.GetOneStringParameter("min_max_log_file", "", &min_max_log_file)); if (min_max_log_file.empty()) { return errors::InvalidArgument( "You must pass a file name to min_max_log_file"); } float min_percentile; TF_RETURN_IF_ERROR( context.GetOneFloatParameter("min_percentile", 5.0f, &min_percentile)); float max_percentile; TF_RETURN_IF_ERROR( context.GetOneFloatParameter("max_percentile", 5.0f, &max_percentile)); std::vector<MinMaxRecord> records; TF_RETURN_IF_ERROR(ExtractMinMaxRecords(min_max_log_file, &records)); if (records.empty()) { return errors::InvalidArgument( "No min/max range logs were found in the log file"); } std::map<string, const NodeDef*> node_map; MapNamesToNodes(input_graph_def, &node_map); bool any_missing_nodes = false; std::map<string, std::vector<MinMaxRecord>> records_by_node; for (const MinMaxRecord& record : records) { records_by_node[record.name].push_back(record); if (!node_map.count(record.name)) { any_missing_nodes = true; LOG(WARNING) << "Node from log not found in graph: " << record.name; } } if (any_missing_nodes) { return errors::InvalidArgument( "Nodes were found in the log file that aren't present in the graph"); } std::map<string, std::pair<float, float>> range_for_nodes; for (const auto& record_info : records_by_node) { const string& name = record_info.first; const std::vector<MinMaxRecord> records = record_info.second; std::vector<float> mins; std::vector<float> maxs; for (const MinMaxRecord& record : records) { mins.push_back(record.min); maxs.push_back(record.max); } std::sort(mins.begin(), mins.end()); std::sort(maxs.begin(), maxs.end()); int min_index = std::round(mins.size() * (min_percentile / 100.0f)); if (min_index < 0) { min_index = 0; } int max_index = std::round(maxs.size() * (1.0f - (max_percentile / 100.0f))); if (max_index > (maxs.size() - 1)) { max_index = maxs.size() - 1; } const float min = mins[min_index]; const float max = maxs[max_index]; range_for_nodes[name] = {min, max}; } std::map<string, string> inputs_to_rename; GraphDef frozen_graph_def; for (const NodeDef& node : input_graph_def.node()) { if (range_for_nodes.count(node.name())) { if (node.op() != "RequantizationRange") { return errors::InvalidArgument( "Node is expected to be a RequantizationRange op: ", node.name(), ", but is: ", node.op()); } const float min_value = range_for_nodes.at(node.name()).first; NodeDef* min_node = frozen_graph_def.mutable_node()->Add(); min_node->set_op("Const"); min_node->set_name(node.name() + "/frozen_min"); SetNodeAttr("dtype", DT_FLOAT, min_node); Tensor min_tensor(DT_FLOAT, {}); min_tensor.flat<float>()(0) = min_value; SetNodeTensorAttr<float>("value", min_tensor, min_node); inputs_to_rename[node.name() + ":0"] = min_node->name() + ":0"; const float max_value = range_for_nodes.at(node.name()).second; NodeDef* max_node = frozen_graph_def.mutable_node()->Add(); max_node->set_op("Const"); max_node->set_name(node.name() + "/frozen_max"); SetNodeAttr("dtype", DT_FLOAT, max_node); Tensor max_tensor(DT_FLOAT, {}); max_tensor.flat<float>()(0) = max_value; SetNodeTensorAttr<float>("value", max_tensor, max_node); inputs_to_rename[node.name() + ":1"] = max_node->name() + ":0"; } else { NodeDef* new_node = frozen_graph_def.mutable_node()->Add(); *new_node = node; } } return RenameNodeInputs(frozen_graph_def, inputs_to_rename, std::unordered_set<string>(), output_graph_def); } REGISTER_GRAPH_TRANSFORM("freeze_requantization_ranges", FreezeRequantizationRanges); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FreezeRequantizationRanges(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); struct MinMaxRecord { string name; float min; float max; }; Status ExtractMinMaxRecords(const string& log_file_name, std::vector<MinMaxRecord>* records); class FreezeRequantizationRangesTest : public ::testing::Test { protected: void TestFreezeRequantizationRanges() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6})); test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Tensor offset_tensor(DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6}); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Tensor offset_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_min_tensor, {0.0f}); Output offset_min_op = Const(root.WithOpName("offset_min_op"), Input::Initializer(offset_min_tensor)); Tensor offset_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_max_tensor, {255.0f}); Output offset_max_op = Const(root.WithOpName("offset_max_op"), Input::Initializer(offset_max_tensor)); QuantizedBiasAdd quantized_bias_add_op( root.WithOpName("bias_add_op"), quantized_op, offset_op, quantized_min_op, quantized_max_op, offset_min_op, offset_max_op, DT_QINT32); RequantizationRange requantization_range_op( root.WithOpName("requantization_range_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out); Requantize requantize_op( root.WithOpName("requantize_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out, requantization_range_op.output_min, requantization_range_op.output_max, DT_QUINT8); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), requantize_op.output, requantize_op.output_min, requantize_op.output_max); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); const string min_max_log_file_name = io::JoinPath(testing::TmpDir(), "min_max_log_file.txt"); { std::unique_ptr<WritableFile> file; TF_ASSERT_OK( Env::Default()->NewWritableFile(min_max_log_file_name, &file)); TF_ASSERT_OK(file->Append("Something irrelevant\n")); TF_ASSERT_OK( file->Append("[SomePrefix] " ";requantization_range_op__print__;__requant_min_max:" "[-2.4313571][10.584145]\n")); TF_ASSERT_OK(file->Append("Something else irrelevant\n")); } TransformFuncContext context; context.input_names = {}; context.output_names = {"dequantize_op"}; context.params = {{"min_max_log_file", {min_max_log_file_name}}}; GraphDef frozen_graph_def; TF_EXPECT_OK( FreezeRequantizationRanges(graph_def, context, &frozen_graph_def)); std::map<string, const NodeDef*> node_map; MapNamesToNodes(frozen_graph_def, &node_map); EXPECT_EQ(0, node_map.count("requantization_range_op")); EXPECT_EQ(1, node_map.count("requantize_op")); const string& min_input = NodeNameFromInput(node_map.at("requantize_op")->input(3)); ASSERT_EQ(1, node_map.count(min_input)); EXPECT_EQ("Const", node_map.at(min_input)->op()); const string& max_input = NodeNameFromInput(node_map.at("requantize_op")->input(4)); ASSERT_EQ(1, node_map.count(max_input)); EXPECT_EQ("Const", node_map.at(max_input)->op()); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK( original_session->Run({}, {"dequantize_op"}, {}, &original_outputs)); std::unique_ptr<Session> frozen_session(NewSession(SessionOptions())); TF_ASSERT_OK(frozen_session->Create(frozen_graph_def)); std::vector<Tensor> frozen_outputs; TF_ASSERT_OK( frozen_session->Run({}, {"dequantize_op"}, {}, &frozen_outputs)); ASSERT_EQ(original_outputs.size(), frozen_outputs.size()); ASSERT_EQ(1, frozen_outputs.size()); test::ExpectTensorNear<float>(original_outputs[0], frozen_outputs[0], 0.5); } void TestExtractMinMaxRecords() { const string min_max_log_file_name = io::JoinPath(testing::TmpDir(), "min_max_log_file2.txt"); { std::unique_ptr<WritableFile> file; TF_ASSERT_OK( Env::Default()->NewWritableFile(min_max_log_file_name, &file)); TF_ASSERT_OK(file->Append("Something irrelevant\n")); TF_ASSERT_OK( file->Append("[SomePrefix] " ";requantization_range_op__print__;__requant_min_max:" "[-2.4313571][10.584145]\n")); TF_ASSERT_OK(file->Append("Something else irrelevant\n")); TF_ASSERT_OK(file->Append( "[SomeOtherPrefix] " ";other_requantization_range_op__print__;__requant_min_max:" "[-1.0][2.0]\n")); TF_ASSERT_OK(file->Append("Something else irrelevant\n")); TF_ASSERT_OK( file->Append("[SomePrefix] " ";requantization_range_op__print__;__requant_min_max:" "[-1.bad][2.0]\n")); } std::vector<MinMaxRecord> records; TF_ASSERT_OK(ExtractMinMaxRecords(min_max_log_file_name, &records)); ASSERT_EQ(2, records.size()); EXPECT_EQ("requantization_range_op", records[0].name); EXPECT_NEAR(-2.4313571f, records[0].min, 1e-5f); EXPECT_NEAR(10.584145f, records[0].max, 1e-5f); EXPECT_EQ("other_requantization_range_op", records[1].name); EXPECT_NEAR(-1.0f, records[1].min, 1e-5f); EXPECT_NEAR(2.0f, records[1].max, 1e-5f); } }; TEST_F(FreezeRequantizationRangesTest, TestFreezeRequantizationRanges) { TestFreezeRequantizationRanges(); } TEST_F(FreezeRequantizationRangesTest, TestExtractMinMaxRecords) { TestExtractMinMaxRecords(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/freeze_requantization_ranges_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4d5bf531-0d43-45fa-a7f4-cfa7237f717c
cpp
tensorflow/tensorflow
remove_nodes
tensorflow/tools/graph_transforms/remove_nodes.cc
tensorflow/tools/graph_transforms/remove_nodes_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RemoveNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { if (!context.params.count("op")) { return errors::InvalidArgument( "remove_nodes expects at least one 'op'" "argument, e.g. remove_nodes(op=Identity)"); } int32_t max_inputs; TF_RETURN_IF_ERROR( context.GetOneInt32Parameter("max_inputs", 1, &max_inputs)); std::set<string> required_nodes; for (const string& input : context.input_names) { required_nodes.insert(NodeNameFromInput(input)); } for (const string& output : context.output_names) { required_nodes.insert(NodeNameFromInput(output)); } std::vector<string> ops_to_remove = context.params.at("op"); GraphDef current_graph_def = input_graph_def; for (const string& op : ops_to_remove) { for (int num_inputs = 1; num_inputs <= max_inputs; ++num_inputs) { OpTypePattern pattern = {op}; pattern.inputs.resize(num_inputs); for (int i = 0; i < num_inputs; ++i) { pattern.inputs[i] = {"*"}; } bool any_nodes_removed; do { any_nodes_removed = false; std::map<string, string> inputs_to_rename; GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( current_graph_def, pattern, [&inputs_to_rename, &required_nodes, &any_nodes_removed]( const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& replace_node = match.node; if (required_nodes.count(replace_node.name())) { LOG(INFO) << "Skipping replacement for " << replace_node.name(); CopyOriginalMatch(match, new_nodes); return OkStatus(); } const NodeDef& input_node = match.inputs[0].node; string target_name = input_node.name(); for (const string& input : replace_node.input()) { if (!input.compare(0, target_name.size(), target_name)) { if (input.size() == target_name.size() || input[target_name.size()] == ':') { target_name = input; break; } } } inputs_to_rename[replace_node.name()] = target_name; inputs_to_rename["^" + replace_node.name()] = "^" + input_node.name(); new_nodes->push_back(input_node); any_nodes_removed = true; return OkStatus(); }, {true}, &replaced_graph_def)); TF_RETURN_IF_ERROR( RenameNodeInputs(replaced_graph_def, inputs_to_rename, std::unordered_set<string>(), &current_graph_def)); } while (any_nodes_removed); } } *output_graph_def = current_graph_def; return OkStatus(); } REGISTER_GRAPH_TRANSFORM("remove_nodes", RemoveNodes); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RemoveNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class RemoveNodesTest : public ::testing::Test { protected: void TestRemoveNodes() { GraphDef graph_def; NodeDef* add_node1 = graph_def.add_node(); add_node1->set_name("add_node1"); add_node1->set_op("Add"); add_node1->add_input("add_node2"); add_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("identity_node1"); add_node2->add_input("identity_node2"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("identity_node1"); add_node3->add_input("const_node3"); NodeDef* identity_node1 = graph_def.add_node(); identity_node1->set_name("identity_node1"); identity_node1->set_op("Identity"); identity_node1->add_input("const_node1"); NodeDef* identity_node2 = graph_def.add_node(); identity_node2->set_name("identity_node2"); identity_node2->set_op("Identity"); identity_node2->add_input("const_node2"); NodeDef* identity_node3 = graph_def.add_node(); identity_node3->set_name("identity_node3"); identity_node3->set_op("Identity"); identity_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* add_node4 = graph_def.add_node(); add_node4->set_name("add_node4"); add_node4->set_op("Add"); add_node4->add_input("add_node2"); add_node4->add_input("add_node3"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"add_node1"}; context.params.insert( std::pair<string, std::vector<string>>({"op", {string("Identity")}})); TF_ASSERT_OK(RemoveNodes(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node1")); EXPECT_EQ("add_node2", node_lookup.at("add_node1")->input(0)); EXPECT_EQ("add_node3", node_lookup.at("add_node1")->input(1)); EXPECT_EQ(1, node_lookup.count("add_node2")); EXPECT_EQ("const_node1", node_lookup.at("add_node2")->input(0)); EXPECT_EQ("const_node2", node_lookup.at("add_node2")->input(1)); EXPECT_EQ(1, node_lookup.count("add_node3")); EXPECT_EQ("const_node1", node_lookup.at("add_node3")->input(0)); EXPECT_EQ("const_node3", node_lookup.at("add_node3")->input(1)); EXPECT_EQ(1, node_lookup.count("add_node4")); EXPECT_EQ("add_node2", node_lookup.at("add_node4")->input(0)); EXPECT_EQ("add_node3", node_lookup.at("add_node4")->input(1)); EXPECT_EQ(0, node_lookup.count("identity_node1")); EXPECT_EQ(0, node_lookup.count("identity_node2")); EXPECT_EQ(0, node_lookup.count("identity_node3")); EXPECT_EQ(1, node_lookup.count("const_node1")); EXPECT_EQ("Const", node_lookup.at("const_node1")->op()); EXPECT_EQ(1, node_lookup.count("const_node2")); EXPECT_EQ("Const", node_lookup.at("const_node2")->op()); EXPECT_EQ(1, node_lookup.count("const_node3")); EXPECT_EQ("Const", node_lookup.at("const_node3")->op()); } void TestRemoveOutputNodes() { GraphDef graph_def; NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("const_node1"); add_node->add_input("const_node2"); NodeDef* identity_node = graph_def.add_node(); identity_node->set_name("identity_node"); identity_node->set_op("Identity"); identity_node->add_input("add_node"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"identity_node"}; context.params.insert( std::pair<string, std::vector<string>>({"op", {string("Identity")}})); TF_ASSERT_OK(RemoveNodes(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node")); EXPECT_EQ("const_node1", node_lookup.at("add_node")->input(0)); EXPECT_EQ("const_node2", node_lookup.at("add_node")->input(1)); EXPECT_EQ(1, node_lookup.count("identity_node")); EXPECT_EQ("add_node", node_lookup.at("identity_node")->input(0)); } void TestRemoveChainedNodes() { GraphDef graph_def; NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* identity_node1 = graph_def.add_node(); identity_node1->set_name("identity_node1"); identity_node1->set_op("Identity"); identity_node1->add_input("const_node1"); NodeDef* identity_node2 = graph_def.add_node(); identity_node2->set_name("identity_node2"); identity_node2->set_op("Identity"); identity_node2->add_input("identity_node1"); NodeDef* identity_node3 = graph_def.add_node(); identity_node3->set_name("identity_node3"); identity_node3->set_op("Identity"); identity_node3->add_input("identity_node2"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("identity_node3"); add_node->add_input("const_node2"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"identity_node"}; context.params.insert( std::pair<string, std::vector<string>>({"op", {string("Identity")}})); TF_ASSERT_OK(RemoveNodes(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node")); EXPECT_EQ("const_node1", node_lookup.at("add_node")->input(0)); EXPECT_EQ("const_node2", node_lookup.at("add_node")->input(1)); EXPECT_EQ(0, node_lookup.count("identity_node1")); EXPECT_EQ(0, node_lookup.count("identity_node2")); EXPECT_EQ(0, node_lookup.count("identity_node3")); } void TestRemoveMultipleInputs() { GraphDef graph_def; NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* const_node4 = graph_def.add_node(); const_node4->set_name("const_node4"); const_node4->set_op("Const"); NodeDef* fake_quant_node = graph_def.add_node(); fake_quant_node->set_name("fake_quant_node"); fake_quant_node->set_op("FakeQuantWithMinMaxVars"); fake_quant_node->add_input("const_node1"); fake_quant_node->add_input("const_node2"); fake_quant_node->add_input("const_node3"); NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("fake_quant_node"); add_node->add_input("const_node4"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"add_node"}; context.params.insert(std::pair<string, std::vector<string>>( {"op", {string("FakeQuantWithMinMaxVars")}})); context.params.insert( std::pair<string, std::vector<string>>({"max_inputs", {string("3")}})); TF_ASSERT_OK(RemoveNodes(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); ASSERT_EQ(1, node_lookup.count("const_node1")); ASSERT_EQ(1, node_lookup.count("const_node4")); ASSERT_EQ(0, node_lookup.count("fake_quant_node")); ASSERT_EQ(1, node_lookup.count("add_node")); EXPECT_EQ("const_node1", node_lookup.at("add_node")->input(0)); EXPECT_EQ("const_node4", node_lookup.at("add_node")->input(1)); } }; TEST_F(RemoveNodesTest, TestRemoveNodes) { TestRemoveNodes(); } TEST_F(RemoveNodesTest, TestRemoveOutputNodes) { TestRemoveOutputNodes(); } TEST_F(RemoveNodesTest, TestRemoveChainedNodes) { TestRemoveChainedNodes(); } TEST_F(RemoveNodesTest, TestRemoveMultipleInputs) { TestRemoveMultipleInputs(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_nodes.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_nodes_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8638c5c4-0199-4090-888d-0e6129dbd870
cpp
tensorflow/tensorflow
sort_by_execution_order
tensorflow/tools/graph_transforms/sort_by_execution_order.cc
tensorflow/tools/graph_transforms/sort_by_execution_order_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status SortByExecutionOrderWithUnusedContext( const GraphDef& input_graph_def, const TransformFuncContext& unused_context, GraphDef* output_graph_def) { return SortByExecutionOrder(input_graph_def, output_graph_def); } REGISTER_GRAPH_TRANSFORM("sort_by_execution_order", SortByExecutionOrderWithUnusedContext); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { class SortByExecutionOrderTest : public ::testing::Test { protected: void GetOrder(const GraphDef& graph_def, std::map<string, int>* order) { for (int i = 0; i < graph_def.node_size(); ++i) { const NodeDef& node = graph_def.node(i); (*order)[node.name()] = i; } } void TestSimpleAdd() { GraphDef graph_def; NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("a_node"); add_node->add_input("b_node"); NodeDef* b_node = graph_def.add_node(); b_node->set_name("b_node"); b_node->set_op("Const"); NodeDef* a_node = graph_def.add_node(); a_node->set_name("a_node"); a_node->set_op("Const"); GraphDef result; TF_ASSERT_OK(SortByExecutionOrder(graph_def, &result)); std::map<string, int> order; GetOrder(result, &order); EXPECT_EQ(2, order["add_node"]); EXPECT_GT(2, order["a_node"]); EXPECT_GT(2, order["b_node"]); } void TestSimpleLinear() { GraphDef graph_def; NodeDef* negative_node = graph_def.add_node(); negative_node->set_name("negative_node"); negative_node->set_op("Negative"); negative_node->add_input("sqrt_node"); NodeDef* relu_node = graph_def.add_node(); relu_node->set_name("relu_node"); relu_node->set_op("Relu"); relu_node->add_input("const_node"); NodeDef* sqrt_node = graph_def.add_node(); sqrt_node->set_name("sqrt_node"); sqrt_node->set_op("Sqrt"); sqrt_node->add_input("relu_node"); NodeDef* const_node = graph_def.add_node(); const_node->set_name("const_node"); const_node->set_op("Const"); GraphDef result; TF_ASSERT_OK(SortByExecutionOrder(graph_def, &result)); std::map<string, int> order; GetOrder(result, &order); EXPECT_EQ(3, order["negative_node"]); EXPECT_EQ(2, order["sqrt_node"]); EXPECT_EQ(1, order["relu_node"]); EXPECT_EQ(0, order["const_node"]); } void TestSimpleTree() { GraphDef graph_def; NodeDef* add_node1 = graph_def.add_node(); add_node1->set_name("add_node1"); add_node1->set_op("Add"); add_node1->add_input("add_node2"); add_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node3"); add_node3->add_input("const_node4"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* const_node4 = graph_def.add_node(); const_node4->set_name("const_node4"); const_node4->set_op("Const"); GraphDef result; TF_ASSERT_OK(SortByExecutionOrder(graph_def, &result)); std::map<string, int> order; GetOrder(result, &order); EXPECT_EQ(6, order["add_node1"]); EXPECT_GT(6, order["add_node2"]); EXPECT_GT(6, order["add_node3"]); EXPECT_GT(5, order["const_node1"]); EXPECT_GT(5, order["const_node2"]); EXPECT_GT(5, order["const_node3"]); EXPECT_GT(5, order["const_node4"]); } void TestCommonAncestor() { GraphDef graph_def; NodeDef* add_node1 = graph_def.add_node(); add_node1->set_name("add_node1"); add_node1->set_op("Add"); add_node1->add_input("add_node2"); add_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); GraphDef result; TF_ASSERT_OK(SortByExecutionOrder(graph_def, &result)); std::map<string, int> order; GetOrder(result, &order); EXPECT_EQ(5, order["add_node1"]); EXPECT_GT(5, order["add_node2"]); EXPECT_GT(5, order["add_node3"]); EXPECT_GT(4, order["const_node2"]); EXPECT_GT(4, order["const_node3"]); EXPECT_GT(3, order["const_node1"]); } }; TEST_F(SortByExecutionOrderTest, TestSimpleAdd) { TestSimpleAdd(); } TEST_F(SortByExecutionOrderTest, TestSimpleLinear) { TestSimpleLinear(); } TEST_F(SortByExecutionOrderTest, TestSimpleTree) { TestSimpleTree(); } TEST_F(SortByExecutionOrderTest, TestCommonAncestor) { TestCommonAncestor(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/sort_by_execution_order.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/sort_by_execution_order_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
62bcdf42-4fcb-4005-9282-b66430d5d200
cpp
tensorflow/tensorflow
fold_old_batch_norms
tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { namespace { Status ErrorIfNotVector(const Tensor& input, const string& input_name, int expected_width) { if ((input.shape().dims() != 1) || (input.shape().dim_size(0) != expected_width)) { return errors::InvalidArgument( input_name, " input to batch norm has bad shape: ", input.shape().DebugString()); } return OkStatus(); } Status GetScaleAndOffsetValues(const NodeMatch& match, std::vector<float>* scale_values, std::vector<float>* offset_values) { const NodeDef& batch_norm_node = match.node; CHECK(batch_norm_node.op() == "BatchNormWithGlobalNormalization" || batch_norm_node.op() == "FusedBatchNorm"); const bool is_fused = batch_norm_node.op() == "FusedBatchNorm"; const int mean_idx = is_fused ? 3 : 1; const int var_idx = is_fused ? 4 : 2; const int beta_idx = is_fused ? 2 : 3; const int gamma_idx = is_fused ? 1 : 4; const string epsilon_attr = is_fused ? "epsilon" : "variance_epsilon"; const bool scale_after_normalization = is_fused || batch_norm_node.attr().at("scale_after_normalization").b(); const NodeDef& mean_node = match.inputs[mean_idx].node; CHECK_EQ("Const", mean_node.op()); const NodeDef& variance_node = match.inputs[var_idx].node; CHECK_EQ("Const", variance_node.op()); const NodeDef& beta_node = match.inputs[beta_idx].node; CHECK_EQ("Const", beta_node.op()); const NodeDef& gamma_node = match.inputs[gamma_idx].node; CHECK_EQ("Const", gamma_node.op()); Tensor mean = GetNodeTensorAttr(mean_node, "value"); Tensor variance = GetNodeTensorAttr(variance_node, "value"); Tensor beta = GetNodeTensorAttr(beta_node, "value"); Tensor gamma = GetNodeTensorAttr(gamma_node, "value"); const float variance_epsilon = batch_norm_node.attr().at(epsilon_attr).f(); const int64_t num_cols = mean.shape().dim_size(0); TF_RETURN_IF_ERROR(ErrorIfNotVector(variance, "Variance", num_cols)); TF_RETURN_IF_ERROR(ErrorIfNotVector(beta, "Beta", num_cols)); TF_RETURN_IF_ERROR(ErrorIfNotVector(gamma, "gamma", num_cols)); scale_values->resize(num_cols); offset_values->resize(num_cols); if (scale_after_normalization) { for (int i = 0; i < num_cols; ++i) { (*scale_values)[i] = (1.0f / sqrtf(variance.flat<float>()(i) + variance_epsilon)) * gamma.flat<float>()(i); } } else { for (int i = 0; i < num_cols; ++i) { (*scale_values)[i] = (1.0f / sqrtf(variance.flat<float>()(i) + variance_epsilon)); } } for (int i = 0; i < num_cols; ++i) { (*offset_values)[i] = (-mean.flat<float>()(i) * (*scale_values)[i]) + beta.flat<float>()(i); } return OkStatus(); } Status FuseScaleOffsetToConvWeights(const std::vector<float>& scale_values, const std::vector<float>& offset_values, const NodeMatch& conv_node_match, const string& conv_output_name, std::vector<NodeDef>* new_nodes) { const NodeDef& conv_node = conv_node_match.node; const NodeDef& input_node = conv_node_match.inputs[0].node; const NodeDef& weights_node = conv_node_match.inputs[1].node; CHECK_EQ("Const", weights_node.op()); Tensor weights = GetNodeTensorAttr(weights_node, "value"); int64_t weights_cols; if (conv_node.op() == "Conv2D") { weights_cols = weights.shape().dim_size(3); } else if (conv_node.op() == "DepthwiseConv2dNative") { weights_cols = weights.shape().dim_size(2) * weights.shape().dim_size(3); } else { weights_cols = weights.shape().dim_size(1); } CHECK_EQ(weights_cols, scale_values.size()); auto weights_vector = weights.flat<float>(); Tensor scaled_weights(DT_FLOAT, weights.shape()); auto scaled_weights_vector = scaled_weights.flat<float>(); for (int64_t row = 0; row < weights_vector.dimension(0); ++row) { scaled_weights_vector(row) = weights_vector(row) * scale_values[row % weights_cols]; } Tensor bias_offset(DT_FLOAT, {weights_cols}); auto bias_offset_vector = bias_offset.flat<float>(); for (int64_t col = 0; col < weights_cols; ++col) { bias_offset_vector(col) = offset_values[col]; } NodeDef scaled_weights_node; scaled_weights_node.set_op("Const"); scaled_weights_node.set_name(weights_node.name()); SetNodeAttr("dtype", DT_FLOAT, &scaled_weights_node); SetNodeTensorAttr<float>("value", scaled_weights, &scaled_weights_node); new_nodes->push_back(scaled_weights_node); new_nodes->push_back(input_node); new_nodes->push_back(conv_node); NodeDef bias_offset_node; bias_offset_node.set_op("Const"); bias_offset_node.set_name(conv_node.name() + "_bn_offset"); SetNodeAttr("dtype", DT_FLOAT, &bias_offset_node); SetNodeTensorAttr<float>("value", bias_offset, &bias_offset_node); new_nodes->push_back(bias_offset_node); NodeDef bias_add_node; bias_add_node.set_op("BiasAdd"); bias_add_node.set_name(conv_output_name); if (conv_node.attr().count("data_format")) { CopyNodeAttr(conv_node, "data_format", "data_format", &bias_add_node); } CopyNodeAttr(conv_node, "T", "T", &bias_add_node); AddNodeInput(conv_node.name(), &bias_add_node); AddNodeInput(bias_offset_node.name(), &bias_add_node); new_nodes->push_back(bias_add_node); return OkStatus(); } Status FuseBatchNormWithConv(const NodeMatch& match, std::vector<NodeDef>* new_nodes) { std::vector<float> scale_values; std::vector<float> offset_values; TF_RETURN_IF_ERROR( GetScaleAndOffsetValues(match, &scale_values, &offset_values)); const NodeDef& batch_norm_node = match.node; TF_RETURN_IF_ERROR( FuseScaleOffsetToConvWeights(scale_values, offset_values, match.inputs[0], batch_norm_node.name(), new_nodes)); return OkStatus(); } Status FuseBatchNormWithBatchToSpace(const NodeMatch& match, std::vector<NodeDef>* new_nodes) { std::vector<float> scale_values; std::vector<float> offset_values; TF_RETURN_IF_ERROR( GetScaleAndOffsetValues(match, &scale_values, &offset_values)); const NodeDef& batch_norm_node = match.node; const NodeMatch& batch_to_space_node_match = match.inputs[0]; const NodeMatch& conv_node_match = batch_to_space_node_match.inputs[0]; const NodeDef& batch_to_space_node = batch_to_space_node_match.node; const NodeDef& conv_node = conv_node_match.node; string biasadd_name = conv_node.name() + "/biasadd"; TF_RETURN_IF_ERROR(FuseScaleOffsetToConvWeights( scale_values, offset_values, conv_node_match, biasadd_name, new_nodes)); NodeDef new_batch_to_space_node = batch_to_space_node; new_batch_to_space_node.set_name(batch_norm_node.name()); new_batch_to_space_node.set_input(0, biasadd_name); new_nodes->push_back(batch_to_space_node_match.inputs[1].node); new_nodes->push_back(batch_to_space_node_match.inputs[2].node); new_nodes->push_back(new_batch_to_space_node); return OkStatus(); } Status FuseBatchNormWithConvConcat(const NodeMatch& match, std::vector<NodeDef>* new_nodes) { std::vector<float> scale_values; std::vector<float> offset_values; TF_RETURN_IF_ERROR( GetScaleAndOffsetValues(match, &scale_values, &offset_values)); const NodeDef& batch_norm_node = match.node; const NodeMatch& concat_node_match = match.inputs[0]; NodeDef concat_node = concat_node_match.node; CHECK_EQ("ConcatV2", concat_node.op()); NodeDef axis_node = concat_node_match.inputs[2].node; CHECK_EQ("Const", axis_node.op()); Tensor axis = GetNodeTensorAttr(axis_node, "value"); int32_t axis_scalar = (axis.scalar<int32>())(); std::vector<float> scale0(scale_values); std::vector<float> offset0(offset_values); std::vector<float> scale1(scale_values); std::vector<float> offset1(offset_values); if (axis_scalar == 3) { const NodeDef& weights0_node = concat_node_match.inputs[0].inputs[1].node; Tensor weights0 = GetNodeTensorAttr(weights0_node, "value"); const int64_t split_cols = weights0.shape().dim_size(3); scale0.erase(scale0.begin() + split_cols, scale0.end()); offset0.erase(offset0.begin() + split_cols, offset0.end()); scale1.erase(scale1.begin(), scale1.begin() + split_cols); offset1.erase(offset1.begin(), offset1.begin() + split_cols); } const string concat0_output_name = concat_node.name() + "_bn_in0"; TF_RETURN_IF_ERROR( FuseScaleOffsetToConvWeights(scale0, offset0, concat_node_match.inputs[0], concat0_output_name, new_nodes)); const string concat1_output_name = concat_node.name() + "_bn_in1"; TF_RETURN_IF_ERROR( FuseScaleOffsetToConvWeights(scale1, offset1, concat_node_match.inputs[1], concat1_output_name, new_nodes)); new_nodes->push_back(concat_node_match.inputs[2].node); concat_node.set_name(batch_norm_node.name()); concat_node.set_input(0, concat0_output_name); concat_node.set_input(1, concat1_output_name); new_nodes->push_back(concat_node); return OkStatus(); } } Status FoldOldBatchNorms(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef current_graph_def = input_graph_def; bool did_graph_change; do { did_graph_change = false; GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( current_graph_def, {"BatchNormWithGlobalNormalization|FusedBatchNorm", { {"Conv2D|DepthwiseConv2dNative", { {"*"}, {"Const"}, } }, {"Const"}, {"Const"}, {"Const"}, {"Const"}, } }, [&did_graph_change](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { TF_RETURN_IF_ERROR(FuseBatchNormWithConv(match, new_nodes)); did_graph_change = true; return OkStatus(); }, {}, &replaced_graph_def)); current_graph_def = replaced_graph_def; } while (did_graph_change); do { did_graph_change = false; GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( current_graph_def, {"BatchNormWithGlobalNormalization|FusedBatchNorm", { {"BatchToSpaceND", { {"Conv2D|DepthwiseConv2dNative", { {"*"}, {"Const"}, } }, {"Const"}, {"Const"}, } }, {"Const"}, {"Const"}, {"Const"}, {"Const"}, } }, [&did_graph_change](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { TF_RETURN_IF_ERROR(FuseBatchNormWithBatchToSpace(match, new_nodes)); did_graph_change = true; return OkStatus(); }, {}, &replaced_graph_def)); current_graph_def = replaced_graph_def; } while (did_graph_change); do { did_graph_change = false; GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( current_graph_def, {"BatchNormWithGlobalNormalization|FusedBatchNorm", { {"ConcatV2|Concat", { {"Conv2D|DepthwiseConv2dNative", { {"*"}, {"Const"}, } }, {"Conv2D|DepthwiseConv2dNative", { {"*"}, {"Const"}, } }, {"Const"}, }, }, {"Const"}, {"Const"}, {"Const"}, {"Const"}, } }, [&did_graph_change](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { TF_RETURN_IF_ERROR(FuseBatchNormWithConvConcat(match, new_nodes)); did_graph_change = true; return OkStatus(); }, {}, &replaced_graph_def)); current_graph_def = replaced_graph_def; } while (did_graph_change); *output_graph_def = current_graph_def; return OkStatus(); } REGISTER_GRAPH_TRANSFORM("fold_old_batch_norms", FoldOldBatchNorms); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FoldOldBatchNorms(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class FoldOldBatchNormsTest : public ::testing::Test { protected: void TestFoldOldBatchNorms() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor mean_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mean_data, {10.0f, 20.0f}); Output mean_op = Const(root.WithOpName("mean_op"), Input::Initializer(mean_data)); Tensor variance_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&variance_data, {0.25f, 0.5f}); Output variance_op = Const(root.WithOpName("variance_op"), Input::Initializer(variance_data)); Tensor beta_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&beta_data, {0.1f, 0.6f}); Output beta_op = Const(root.WithOpName("beta_op"), Input::Initializer(beta_data)); Tensor gamma_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&gamma_data, {1.0f, 2.0f}); Output gamma_op = Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data)); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); NodeDef batch_norm_node; batch_norm_node.set_op("BatchNormWithGlobalNormalization"); batch_norm_node.set_name("output"); AddNodeInput("conv_op", &batch_norm_node); AddNodeInput("mean_op", &batch_norm_node); AddNodeInput("variance_op", &batch_norm_node); AddNodeInput("beta_op", &batch_norm_node); AddNodeInput("gamma_op", &batch_norm_node); SetNodeAttr("T", DT_FLOAT, &batch_norm_node); SetNodeAttr("variance_epsilon", 0.00001f, &batch_norm_node); SetNodeAttr("scale_after_normalization", false, &batch_norm_node); *(original_graph_def.mutable_node()->Add()) = batch_norm_node; original_graph_def.mutable_versions()->set_producer(8); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("BatchNormWithGlobalNormalization", node.op()); } } void TestFoldOldBatchNormsAfterDepthwiseConv2dNative() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor mean_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f}); Output mean_op = Const(root.WithOpName("mean_op"), Input::Initializer(mean_data)); Tensor variance_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&variance_data, {0.25f, 0.5f, 0.75f, 1.0f}); Output variance_op = Const(root.WithOpName("variance_op"), Input::Initializer(variance_data)); Tensor beta_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&beta_data, {0.1f, 0.6f, 1.1f, 1.6f}); Output beta_op = Const(root.WithOpName("beta_op"), Input::Initializer(beta_data)); Tensor gamma_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&gamma_data, {1.0f, 2.0f, 3.0f, 4.0f}); Output gamma_op = Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data)); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); NodeDef batch_norm_node; batch_norm_node.set_op("BatchNormWithGlobalNormalization"); batch_norm_node.set_name("output"); AddNodeInput("conv_op", &batch_norm_node); AddNodeInput("mean_op", &batch_norm_node); AddNodeInput("variance_op", &batch_norm_node); AddNodeInput("beta_op", &batch_norm_node); AddNodeInput("gamma_op", &batch_norm_node); SetNodeAttr("T", DT_FLOAT, &batch_norm_node); SetNodeAttr("variance_epsilon", 0.00001f, &batch_norm_node); SetNodeAttr("scale_after_normalization", false, &batch_norm_node); *(original_graph_def.mutable_node()->Add()) = batch_norm_node; original_graph_def.mutable_versions()->set_producer(8); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("BatchNormWithGlobalNormalization", node.op()); } } void TestFoldFusedBatchNorms() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor mean_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mean_data, {10.0f, 20.0f}); Output mean_op = Const(root.WithOpName("mean_op"), Input::Initializer(mean_data)); Tensor variance_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&variance_data, {0.25f, 0.5f}); Output variance_op = Const(root.WithOpName("variance_op"), Input::Initializer(variance_data)); Tensor beta_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&beta_data, {0.1f, 0.6f}); Output beta_op = Const(root.WithOpName("beta_op"), Input::Initializer(beta_data)); Tensor gamma_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&gamma_data, {1.0f, 2.0f}); Output gamma_op = Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data)); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); NodeDef batch_norm_node; batch_norm_node.set_op("FusedBatchNorm"); batch_norm_node.set_name("output"); AddNodeInput("conv_op", &batch_norm_node); AddNodeInput("gamma_op", &batch_norm_node); AddNodeInput("beta_op", &batch_norm_node); AddNodeInput("mean_op", &batch_norm_node); AddNodeInput("variance_op", &batch_norm_node); SetNodeAttr("T", DT_FLOAT, &batch_norm_node); SetNodeAttr("epsilon", 0.00001f, &batch_norm_node); SetNodeAttr("is_training", false, &batch_norm_node); *(original_graph_def.mutable_node()->Add()) = batch_norm_node; std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 2e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("FusedBatchNorm", node.op()); } } void TestFoldFusedBatchNormsAfterDepthwiseConv2dNative() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor mean_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f}); Output mean_op = Const(root.WithOpName("mean_op"), Input::Initializer(mean_data)); Tensor variance_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&variance_data, {0.25f, 0.5f, 0.75f, 1.0f}); Output variance_op = Const(root.WithOpName("variance_op"), Input::Initializer(variance_data)); Tensor beta_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&beta_data, {0.1f, 0.6f, 1.1f, 1.6f}); Output beta_op = Const(root.WithOpName("beta_op"), Input::Initializer(beta_data)); Tensor gamma_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&gamma_data, {1.0f, 2.0f, 3.0f, 4.0f}); Output gamma_op = Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data)); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); NodeDef batch_norm_node; batch_norm_node.set_op("FusedBatchNorm"); batch_norm_node.set_name("output"); AddNodeInput("conv_op", &batch_norm_node); AddNodeInput("gamma_op", &batch_norm_node); AddNodeInput("beta_op", &batch_norm_node); AddNodeInput("mean_op", &batch_norm_node); AddNodeInput("variance_op", &batch_norm_node); SetNodeAttr("T", DT_FLOAT, &batch_norm_node); SetNodeAttr("epsilon", 0.00001f, &batch_norm_node); SetNodeAttr("is_training", false, &batch_norm_node); *(original_graph_def.mutable_node()->Add()) = batch_norm_node; std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectClose(original_outputs[0], fused_outputs[0], 2e-5, 2e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("FusedBatchNorm", node.op()); } } void TestFoldFusedBatchNormsWithConcat(const bool split) { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; auto input_shape = split ? TensorShape({1, 1, 6, 2}) : TensorShape({1, 1, 12, 1}); Tensor input_data(DT_FLOAT, input_shape); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input0_op = Const(root.WithOpName("input_op0"), Input::Initializer(input_data)); auto weight_shape = split ? TensorShape({1, 2, 2, 1}) : TensorShape({1, 2, 1, 2}); Tensor weights0_data(DT_FLOAT, weight_shape); test::FillValues<float>(&weights0_data, {1.0f, 2.0f, 3.0f, 4.0f}); Output weights0_op = Const(root.WithOpName("weights1_op"), Input::Initializer(weights0_data)); Output conv0_op = Conv2D(root.WithOpName("conv1_op"), input0_op, weights0_op, {1, 1, 1, 1}, "VALID"); Output input1_op = Const(root.WithOpName("input1_op"), Input::Initializer(input_data)); Tensor weights1_data(DT_FLOAT, weight_shape); test::FillValues<float>(&weights1_data, {1.0f, 2.0f, 3.0f, 4.0f}); Output weights1_op = Const(root.WithOpName("weights1_op"), Input::Initializer(weights1_data)); Output conv1_op = Conv2D(root.WithOpName("conv1_op"), input1_op, weights1_op, {1, 1, 1, 1}, "VALID"); Tensor shape_tensor(DT_INT32, TensorShape({})); int32_t concat_axis = split ? 3 : 2; test::FillValues<int32>(&shape_tensor, {concat_axis}); Output shape_op = Const(root.WithOpName("shape_op"), Input::Initializer(shape_tensor)); Output concat_op = Concat(root.WithOpName("concat_op"), {conv0_op, conv1_op}, shape_op); Tensor mean_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mean_data, {10.0f, 20.0f}); Output mean_op = Const(root.WithOpName("mean_op"), Input::Initializer(mean_data)); Tensor variance_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&variance_data, {0.25f, 0.5f}); Output variance_op = Const(root.WithOpName("variance_op"), Input::Initializer(variance_data)); Tensor beta_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&beta_data, {0.1f, 0.6f}); Output beta_op = Const(root.WithOpName("beta_op"), Input::Initializer(beta_data)); Tensor gamma_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&gamma_data, {1.0f, 2.0f}); Output gamma_op = Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data)); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); NodeDef batch_norm_node; batch_norm_node.set_op("FusedBatchNorm"); batch_norm_node.set_name("output"); AddNodeInput("concat_op", &batch_norm_node); AddNodeInput("gamma_op", &batch_norm_node); AddNodeInput("beta_op", &batch_norm_node); AddNodeInput("mean_op", &batch_norm_node); AddNodeInput("variance_op", &batch_norm_node); SetNodeAttr("T", DT_FLOAT, &batch_norm_node); SetNodeAttr("epsilon", 0.00001f, &batch_norm_node); SetNodeAttr("is_training", false, &batch_norm_node); *(original_graph_def.mutable_node()->Add()) = batch_norm_node; std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectClose(original_outputs[0], fused_outputs[0]); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("FusedBatchNorm", node.op()); } } }; void TestFoldFusedBatchNormsWithBatchToSpace() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({2, 1, 3, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor block_shape_data(DT_INT32, TensorShape({2})); test::FillValues<int32>(&block_shape_data, {1, 2}); Output block_shape_op = Const(root.WithOpName("block_shape_op"), Input::Initializer(block_shape_data)); Tensor crops_data(DT_INT32, TensorShape({2, 2})); test::FillValues<int32>(&crops_data, {0, 0, 0, 1}); Output crops_op = Const(root.WithOpName("crops_op"), Input::Initializer(crops_data)); Output batch_to_space_op = BatchToSpaceND(root.WithOpName("batch_to_space_op"), conv_op, block_shape_op, crops_data); Tensor mean_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mean_data, {10.0f, 20.0f}); Output mean_op = Const(root.WithOpName("mean_op"), Input::Initializer(mean_data)); Tensor variance_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&variance_data, {0.25f, 0.5f}); Output variance_op = Const(root.WithOpName("variance_op"), Input::Initializer(variance_data)); Tensor beta_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&beta_data, {0.1f, 0.6f}); Output beta_op = Const(root.WithOpName("beta_op"), Input::Initializer(beta_data)); Tensor gamma_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&gamma_data, {1.0f, 2.0f}); Output gamma_op = Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data)); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); NodeDef batch_norm_node; batch_norm_node.set_op("FusedBatchNorm"); batch_norm_node.set_name("output"); AddNodeInput("batch_to_space_op", &batch_norm_node); AddNodeInput("gamma_op", &batch_norm_node); AddNodeInput("beta_op", &batch_norm_node); AddNodeInput("mean_op", &batch_norm_node); AddNodeInput("variance_op", &batch_norm_node); SetNodeAttr("T", DT_FLOAT, &batch_norm_node); SetNodeAttr("epsilon", 0.00001f, &batch_norm_node); SetNodeAttr("is_training", false, &batch_norm_node); *(original_graph_def.mutable_node()->Add()) = batch_norm_node; std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("FusedBatchNormWithBatchToSpace", node.op()); } } TEST_F(FoldOldBatchNormsTest, TestFoldOldBatchNorms) { TestFoldOldBatchNorms(); } TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNorms) { TestFoldFusedBatchNorms(); } TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsWithConcat) { TestFoldFusedBatchNormsWithConcat(true); TestFoldFusedBatchNormsWithConcat(false); } TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsWithBatchToSpace) { TestFoldFusedBatchNormsWithBatchToSpace(); } TEST_F(FoldOldBatchNormsTest, TestFoldOldBatchNormsAfterDepthwiseConv2dNative) { TestFoldOldBatchNormsAfterDepthwiseConv2dNative(); } TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsAfterDepthwiseConv2dNative) { TestFoldFusedBatchNormsAfterDepthwiseConv2dNative(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6318f003-9cc1-4633-bfab-6c6cc68f3bfa
cpp
tensorflow/tensorflow
flatten_atrous
tensorflow/tools/graph_transforms/flatten_atrous.cc
tensorflow/tools/graph_transforms/flatten_atrous_test.cc
#include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FlattenAtrousConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"BatchToSpaceND", { {"Conv2D|DepthwiseConv2dNative", { {"SpaceToBatchND", { {"*"}, {"*"}, {"*"} } }, {"*"} } }, {"*"}, {"*"} } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& batch_to_space_node = match.node; const NodeDef& conv_node = match.inputs[0].node; const NodeDef& filter_node = match.inputs[0].inputs[1].node; const NodeDef& input_node = match.inputs[0].inputs[0].inputs[0].node; const NodeDef& space_to_batch_block_shape_node = match.inputs[0].inputs[0].inputs[1].node; Tensor block_shape = GetNodeTensorAttr(space_to_batch_block_shape_node, "value"); const int32_t block_height = block_shape.flat<int32>()(0); const int32_t block_width = block_shape.flat<int32>()(1); const Tensor& filter = GetNodeTensorAttr(filter_node, "value"); const int32_t filter_height = filter.dim_size(0); const int32_t filter_width = filter.dim_size(1); const int32_t in_channels = filter.dim_size(2); const int32_t out_channels = filter.dim_size(3); const int32_t upsampled_filter_height = (filter_height - 1) * block_height + 1; const int32_t upsampled_filter_width = (filter_width - 1) * block_width + 1; Tensor upsampled_filter( DT_FLOAT, TensorShape({upsampled_filter_height, upsampled_filter_width, in_channels, out_channels})); auto filter_eigen = filter.tensor<float, 4>(); auto upsampled_filter_eigen = upsampled_filter.tensor<float, 4>(); upsampled_filter_eigen.setZero(); for (int h = 0; h < filter_height; ++h) { for (int w = 0; w < filter_width; ++w) { for (int c_in = 0; c_in < in_channels; ++c_in) { for (int c_out = 0; c_out < out_channels; ++c_out) { upsampled_filter_eigen(block_height * h, block_width * w, c_in, c_out) = filter_eigen(h, w, c_in, c_out); } } } } NodeDef upsampled_filter_node; upsampled_filter_node.set_op("Const"); upsampled_filter_node.set_name(filter_node.name()); SetNodeAttr("dtype", DT_FLOAT, &upsampled_filter_node); SetNodeTensorAttr<float>("value", upsampled_filter, &upsampled_filter_node); NodeDef flattened_conv_node; flattened_conv_node.set_name(batch_to_space_node.name()); flattened_conv_node.set_op(conv_node.op()); flattened_conv_node.set_device(conv_node.device()); AddNodeInput(input_node.name(), &flattened_conv_node); AddNodeInput(upsampled_filter_node.name(), &flattened_conv_node); CopyNodeAttr(conv_node, "T", "T", &flattened_conv_node); CopyNodeAttr(conv_node, "strides", "strides", &flattened_conv_node); SetNodeAttr("padding", "SAME", &flattened_conv_node); CopyNodeAttr(conv_node, "data_format", "data_format", &flattened_conv_node); if (conv_node.op() == "Conv2D") { CopyNodeAttr(conv_node, "use_cudnn_on_gpu", "use_cudnn_on_gpu", &flattened_conv_node); } new_nodes->push_back(input_node); new_nodes->push_back(upsampled_filter_node); new_nodes->push_back(flattened_conv_node); return OkStatus(); }, {}, &replaced_graph_def)); *output_graph_def = replaced_graph_def; return OkStatus(); } REGISTER_GRAPH_TRANSFORM("flatten_atrous_conv", FlattenAtrousConv); } }
#include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FlattenAtrousConv(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class FlattenAtrousConvTest : public ::testing::Test { protected: template <class TConvOp> void TestFlattenAtrousConv() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 3, 3, 2})); test::FillValues<float>( &input_data, {.1f, .4f, .2f, .5f, .3f, .6f, -1.0f, -.4f, -.2f, -.5f, -.3f, -.6f, .1f, .4f, .2f, .5f, .3f, .6f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor block_shape_data(DT_INT32, TensorShape({2})); test::FillValues<int>(&block_shape_data, {2, 2}); Output block_shape_op = Const(root.WithOpName("block_shape_op"), Input::Initializer(block_shape_data)); Tensor paddings_data(DT_INT32, TensorShape({2, 2})); test::FillValues<int>(&paddings_data, {1, 2, 1, 2}); Output paddings_op = Const(root.WithOpName("paddings_op"), Input::Initializer(paddings_data)); Output space_to_batch_op = SpaceToBatchND(root.WithOpName("space_to_batch_op"), input_op, block_shape_op, paddings_op); Tensor weights_data(DT_FLOAT, TensorShape({2, 2, 2, 1})); test::FillValues<float>(&weights_data, {.1f, .2f, .3f, .4f, .1f, .2f, .3f, .4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = TConvOp(root.WithOpName("conv_op"), space_to_batch_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor crops_data(DT_INT32, TensorShape({2, 2})); test::FillValues<int>(&crops_data, {0, 1, 0, 1}); Output crops_op = Const(root.WithOpName("crops_op"), Input::Initializer(crops_data)); Output batch_to_space_op = BatchToSpaceND( root.WithOpName("output"), conv_op, block_shape_op, crops_op); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef modified_graph_def; TF_ASSERT_OK(FlattenAtrousConv(original_graph_def, {{}, {"output"}}, &modified_graph_def)); std::unique_ptr<Session> modified_session(NewSession(SessionOptions())); TF_ASSERT_OK(modified_session->Create(modified_graph_def)); std::vector<Tensor> modified_outputs; TF_ASSERT_OK(modified_session->Run({}, {"output"}, {}, &modified_outputs)); EXPECT_EQ(3, modified_graph_def.node_size()); EXPECT_EQ("input_op", modified_graph_def.node(0).name()); EXPECT_EQ("weights_op", modified_graph_def.node(1).name()); EXPECT_EQ("output", modified_graph_def.node(2).name()); EXPECT_EQ("Const", modified_graph_def.node(0).op()); EXPECT_EQ("Const", modified_graph_def.node(1).op()); EXPECT_EQ(conv_op.node()->type_string(), modified_graph_def.node(2).op()); test::ExpectTensorNear<float>(original_outputs[0], modified_outputs[0], 1e-6); } }; TEST_F(FlattenAtrousConvTest, TestFlattenAtrousConv2D) { TestFlattenAtrousConv<::tensorflow::ops::Conv2D>(); } TEST_F(FlattenAtrousConvTest, TestFlattenAtrousDepthwiseConv2dNative) { TestFlattenAtrousConv<::tensorflow::ops::DepthwiseConv2dNative>(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/flatten_atrous.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/flatten_atrous_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
db16404e-1780-48da-81e8-d633f74808dd
cpp
tensorflow/tensorflow
remove_device
tensorflow/tools/graph_transforms/remove_device.cc
tensorflow/tools/graph_transforms/remove_device_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RemoveDevice(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { output_graph_def->Clear(); for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; new_node->set_device(""); } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("remove_device", RemoveDevice); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RemoveDevice(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class RemoveDeviceTest : public ::testing::Test { protected: void TestRemoveDevice() { GraphDef graph_def; NodeDef* mul_node1 = graph_def.add_node(); mul_node1->set_name("mul_node1"); mul_node1->set_op("Mul"); mul_node1->set_device(" mul_node1->add_input("add_node2"); mul_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); add_node2->set_device(" NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* add_node4 = graph_def.add_node(); add_node4->set_name("add_node4"); add_node4->set_op("Add"); add_node4->add_input("add_node2"); add_node4->add_input("add_node3"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"mul_node1"}; TF_ASSERT_OK(RemoveDevice(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ("", node_lookup.at("mul_node1")->device()); EXPECT_EQ("", node_lookup.at("add_node2")->device()); } }; TEST_F(RemoveDeviceTest, TestRemoveDevice) { TestRemoveDevice(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_device.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_device_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4d7053c1-381d-4461-8679-34c486c3ab16
cpp
tensorflow/tensorflow
transform_graph
tensorflow/tools/graph_transforms/transform_graph.cc
tensorflow/tools/graph_transforms/transform_graph_test.cc
#include "tensorflow/tools/graph_transforms/transform_graph.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/lib/strings/scanner.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/util/command_line_flags.h" #include "tensorflow/tools/graph_transforms/file_utils.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" #if !defined(PLATFORM_WINDOWS) #include <pwd.h> #include <unistd.h> #endif namespace tensorflow { namespace graph_transforms { using tensorflow::strings::Scanner; Status ParseTransformParameters(const string& transforms_string, TransformParameters* params_list) { params_list->clear(); enum { TRANSFORM_NAME, TRANSFORM_PARAM_NAME, TRANSFORM_PARAM_VALUE, } state = TRANSFORM_NAME; StringPiece remaining(transforms_string); StringPiece match; StringPiece transform_name; StringPiece parameter_name; StringPiece parameter_value; TransformFuncParameters func_parameters; while (!remaining.empty()) { if (state == TRANSFORM_NAME) { func_parameters.clear(); Scanner(remaining).AnySpace().GetResult(&remaining, &match); if (remaining.empty()) { return OkStatus(); } const bool found_transform_name = Scanner(remaining) .Many(Scanner::LETTER_DIGIT_UNDERSCORE) .GetResult(&remaining, &transform_name); if (!found_transform_name) { return errors::InvalidArgument("Looking for transform name, but found ", string(remaining).c_str()); } if (Scanner(remaining).OneLiteral("(").GetResult(&remaining, &match)) { state = TRANSFORM_PARAM_NAME; } else { params_list->push_back({string(transform_name), func_parameters}); transform_name = ""; state = TRANSFORM_NAME; } } else if (state == TRANSFORM_PARAM_NAME) { if (Scanner(remaining).OneLiteral(")").GetResult(&remaining, &match)) { params_list->push_back({string(transform_name), func_parameters}); transform_name = ""; state = TRANSFORM_NAME; } else { Scanner(remaining).ZeroOrOneLiteral(",").GetResult(&remaining, &match); Scanner(remaining).AnySpace().GetResult(&remaining, &match); const bool found_parameter_name = Scanner(remaining) .Many(Scanner::LETTER_DIGIT_UNDERSCORE) .GetResult(&remaining, &parameter_name); if (!found_parameter_name) { return errors::InvalidArgument( "Looking for parameter name, but found ", string(remaining).c_str()); } if (Scanner(remaining).OneLiteral("=").GetResult(&remaining, &match)) { state = TRANSFORM_PARAM_VALUE; } else { return errors::InvalidArgument("Looking for =, but found ", string(remaining).c_str()); } } } else if (state == TRANSFORM_PARAM_VALUE) { bool found_parameter_value; if (Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match)) { found_parameter_value = Scanner(remaining).ScanEscapedUntil('"').GetResult( &remaining, &parameter_value); if (found_parameter_value) { Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match); } } else { found_parameter_value = Scanner(remaining) .Many(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE) .GetResult(&remaining, &parameter_value); } if (!found_parameter_value) { return errors::InvalidArgument("Looking for parameter name, but found ", string(remaining).c_str()); } func_parameters[string(parameter_name)].emplace_back(parameter_value); Scanner(remaining).ZeroOrOneLiteral("\"").GetResult(&remaining, &match); Scanner(remaining).ZeroOrOneLiteral("'").GetResult(&remaining, &match); state = TRANSFORM_PARAM_NAME; } } return OkStatus(); } std::string ExpandPath(const std::string& path_string) { #if defined(PLATFORM_WINDOWS) return path_string; #else if (path_string.empty() || path_string[0] != '~') { return path_string; } const char* home = nullptr; std::string::size_type prefix = path_string.find_first_of('/'); if (path_string.length() == 1 || prefix == 1) { home = getenv("HOME"); if (!home) { struct passwd* pw = getpwuid(getuid()); if (pw) { home = pw->pw_dir; } } } else { std::string user(path_string, 1, (prefix == std::string::npos) ? std::string::npos : prefix - 1); struct passwd* pw = getpwnam(user.c_str()); if (pw) { home = pw->pw_dir; } } if (!home) { return path_string; } string path(home); if (prefix == std::string::npos) { return path; } if (path.length() == 0 || path[path.length() - 1] != '/') { path += '/'; } path += path_string.substr(prefix + 1); return path; #endif } int ParseFlagsAndTransformGraph(int argc, char* argv[], bool init_main) { string in_graph_string = ""; string out_graph_string = ""; string inputs_string = ""; string outputs_string = ""; string transforms_string = ""; bool output_as_text = false; std::vector<Flag> flag_list = { Flag("in_graph", &in_graph_string, "input graph file name"), Flag("out_graph", &out_graph_string, "output graph file name"), Flag("inputs", &inputs_string, "inputs"), Flag("outputs", &outputs_string, "outputs"), Flag("transforms", &transforms_string, "list of transforms"), Flag("output_as_text", &output_as_text, "whether to write the graph in text protobuf format"), }; string usage = Flags::Usage(argv[0], flag_list); usage += "\nTransforms are:\n"; TransformRegistry* transform_registry = GetTransformRegistry(); for (const auto& pair : *transform_registry) { usage += pair.first + "\n"; } const bool parse_result = Flags::Parse(&argc, argv, flag_list); if (init_main) { port::InitMain(argv[0], &argc, &argv); } if (!parse_result) { LOG(ERROR) << usage; return -1; } if (argc > 1) { LOG(ERROR) << "Unknown argument " << argv[1] << ".\n" << usage; return -1; } if (in_graph_string.empty()) { LOG(ERROR) << "in_graph graph can't be empty.\n" << usage; return -1; } if (out_graph_string.empty()) { LOG(ERROR) << "out_graph graph can't be empty.\n" << usage; return -1; } if (transforms_string.empty()) { LOG(ERROR) << "You must specify at least one transform.\n" << usage; return -1; } string in_graph = ExpandPath(in_graph_string); string out_graph = ExpandPath(out_graph_string); std::vector<string> inputs = str_util::Split(inputs_string, ','); std::vector<string> outputs = str_util::Split(outputs_string, ','); TransformParameters transform_params; Status parse_status = ParseTransformParameters(transforms_string, &transform_params); if (!parse_status.ok()) { LOG(ERROR) << "Failed to parse --transform argument, error was " << parse_status.message(); return -1; } if (transform_params.empty()) { LOG(ERROR) << "You must specify at least one transform.\n" << usage; return -1; } GraphDef graph_def; Status load_status = LoadTextOrBinaryGraphFile(in_graph, &graph_def); if (!load_status.ok()) { LOG(ERROR) << "Loading graph '" << in_graph_string << "' failed with " << load_status.message(); LOG(ERROR) << usage; return -1; } Status transform_result = TransformGraph(inputs, outputs, transform_params, &graph_def); if (!transform_result.ok()) { LOG(ERROR) << transform_result.message(); LOG(ERROR) << usage; return -1; } Status save_status; if (output_as_text) { save_status = WriteTextProto(Env::Default(), out_graph, graph_def); } else { save_status = WriteBinaryProto(Env::Default(), out_graph, graph_def); } if (!save_status.ok()) { LOG(ERROR) << "Saving graph '" << out_graph_string << "' failed with " << save_status.message(); return -1; } return 0; } Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params, bool* ignore_errors) { *ignore_errors = false; if (transform_params.count("ignore_errors") && (!transform_params.at("ignore_errors").empty())) { const string& ignore_errors_string = absl::AsciiStrToLower(transform_params.at("ignore_errors").at(0)); if (ignore_errors_string == "true") { *ignore_errors = true; } else if (ignore_errors_string == "false") { *ignore_errors = false; } else { return errors::InvalidArgument( "ignore_errors should be true or false, found ", ignore_errors_string); } } return OkStatus(); } Status TransformGraph(const std::vector<string>& inputs, const std::vector<string>& outputs, const TransformParameters& transform_params, GraphDef* graph_def) { TransformRegistry* transform_registry = GetTransformRegistry(); for (const auto& transform_info : transform_params) { const string& transform_name = transform_info.first; if (transform_name.empty()) { continue; } if (!transform_registry->count(transform_name)) { return errors::InvalidArgument("Transform '", transform_name, "' not recognized."); } LOG(INFO) << "Applying " << transform_name; const TransformFunc& transform_func = transform_registry->at(transform_name); TransformFuncContext context; context.input_names = inputs; context.output_names = outputs; context.params = transform_info.second; bool ignore_errors; TF_RETURN_IF_ERROR( ShouldIgnoreErrors(transform_info.second, &ignore_errors)); GraphDef transformed_graph_def; Status transform_result = transform_func(*graph_def, context, &transformed_graph_def); if (!transform_result.ok()) { if (ignore_errors) { LOG(ERROR) << transform_name << ": Ignoring error " << transform_result.message(); transformed_graph_def = *graph_def; } else { return transform_result; } } *transformed_graph_def.mutable_library() = graph_def->library(); TF_RETURN_IF_ERROR(IsGraphValid(transformed_graph_def)); *graph_def = transformed_graph_def; } return OkStatus(); } } }
#include "tensorflow/tools/graph_transforms/transform_graph.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params, bool* ignore_errors); namespace { Status test_empty_graph_transform(const GraphDef& graph_def, const TransformFuncContext& context, GraphDef* result) { result->Clear(); return OkStatus(); } } REGISTER_GRAPH_TRANSFORM("test_empty_graph_transform", test_empty_graph_transform); class TransformGraphTest : public ::testing::Test { protected: void TestConstantFolding() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a_expect_removed"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b_expect_removed"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add_expect_removed"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT); Output mul = Mul(root.WithOpName("output_expect_remains"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); string graph_def_serialized; graph_def.SerializeToString(&graph_def_serialized); const string dir = testing::TmpDir(); const string in_filename_pb = io::JoinPath(dir, "in_graphdef.pb"); const string out_filename_pb = io::JoinPath(dir, "out_graphdef.pb"); TF_ASSERT_OK(WriteStringToFile(Env::Default(), in_filename_pb, graph_def_serialized)); std::vector<string> args = {"some_binary", "--in_graph=" + in_filename_pb, "--out_graph=" + out_filename_pb, "--inputs=placeholder_expect_remains", "--outputs=output_expect_remains", "--transforms=fold_constants"}; const int argc = 6; EXPECT_EQ(argc, args.size()); char* argv[argc]; std::vector<char*> char_strings; for (int i = 0; i < argc; ++i) { string arg = args[i]; char* char_string = new char[arg.size() + 1]; std::copy_n(arg.c_str(), arg.size() + 1, char_string); argv[i] = char_string; char_strings.push_back(char_string); } ParseFlagsAndTransformGraph(argc, argv, false); for (char* char_string : char_strings) { delete[] char_string; } GraphDef out_graph_def; TF_EXPECT_OK( ReadBinaryProto(Env::Default(), out_filename_pb, &out_graph_def)); std::map<string, const NodeDef*> out_node_map; graph_transforms::MapNamesToNodes(out_graph_def, &out_node_map); for (const NodeDef& node : out_graph_def.node()) { const int occurrence_count = out_node_map.count(node.name()); if (str_util::EndsWith(node.name(), "expect_removed")) { EXPECT_EQ(0, occurrence_count) << "node.name()=" << node.name(); } if (str_util::EndsWith(node.name(), "expect_remains")) { EXPECT_EQ(1, occurrence_count) << "node.name()=" << node.name(); } } } void TestTransformRegistration() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Output placeholder = Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); EXPECT_EQ(1, graph_def.node().size()); TF_ASSERT_OK(TransformGraph({}, {}, {{"test_empty_graph_transform", {}}}, &graph_def)); EXPECT_EQ(0, graph_def.node().size()); TF_ASSERT_OK(root.ToGraphDef(&graph_def)); Status no_such_status = TransformGraph({}, {}, {{"test_no_such_transform", {}}}, &graph_def); EXPECT_TRUE(absl::StrContains(no_such_status.ToString(), "not recognized")); } void TestParseTransformParameters() { TransformParameters params_list; TF_EXPECT_OK(ParseTransformParameters("foo", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); TF_EXPECT_OK(ParseTransformParameters("foo bar", &params_list)); EXPECT_EQ(2, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); EXPECT_EQ("bar", params_list[1].first); EXPECT_TRUE(params_list[1].second.empty()); TF_EXPECT_OK(ParseTransformParameters("foo() bar()", &params_list)); EXPECT_EQ(2, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); EXPECT_EQ("bar", params_list[1].first); EXPECT_TRUE(params_list[1].second.empty()); TF_EXPECT_OK( ParseTransformParameters("foo(bob_something=sue)", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_EQ(1, params_list[0].second.count("bob_something")); EXPECT_EQ(1, params_list[0].second["bob_something"].size()); EXPECT_EQ("sue", params_list[0].second["bob_something"][0]); TF_EXPECT_OK(ParseTransformParameters("bar(a=1, b=2, a=3)", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("bar", params_list[0].first); EXPECT_EQ(1, params_list[0].second.count("a")); EXPECT_EQ(2, params_list[0].second["a"].size()); EXPECT_EQ("1", params_list[0].second["a"][0]); EXPECT_EQ("3", params_list[0].second["a"][1]); EXPECT_EQ(1, params_list[0].second.count("b")); EXPECT_EQ(1, params_list[0].second["b"].size()); EXPECT_EQ("2", params_list[0].second["b"][0]); TF_EXPECT_OK(ParseTransformParameters("bar(a=\"1\", b=\"1,2,3\", a=3)", &params_list)); EXPECT_EQ(1, params_list.size()); EXPECT_EQ("bar", params_list[0].first); EXPECT_EQ(1, params_list[0].second.count("a")); EXPECT_EQ(2, params_list[0].second["a"].size()); EXPECT_EQ("1", params_list[0].second["a"][0]); EXPECT_EQ("3", params_list[0].second["a"][1]); EXPECT_EQ(1, params_list[0].second.count("b")); EXPECT_EQ(1, params_list[0].second["b"].size()); EXPECT_EQ("1,2,3", params_list[0].second["b"][0]); } void TestParseEscapedNewline() { TransformParameters params_list; ParseTransformParameters("\\\n", &params_list).IgnoreError(); EXPECT_EQ(0, params_list.size()); } void TestParseExtraSpaces() { TransformParameters params_list; ParseTransformParameters(" ", &params_list).IgnoreError(); EXPECT_EQ(0, params_list.size()); TF_EXPECT_OK(ParseTransformParameters(" foo bar \\\n", &params_list)); EXPECT_EQ(2, params_list.size()); EXPECT_EQ("foo", params_list[0].first); EXPECT_TRUE(params_list[0].second.empty()); EXPECT_EQ("bar", params_list[1].first); EXPECT_TRUE(params_list[1].second.empty()); } void TestShouldIgnoreErrors() { bool ignore_errors; TF_EXPECT_OK( ShouldIgnoreErrors({{"ignore_errors", {"true"}}}, &ignore_errors)); EXPECT_TRUE(ignore_errors); TF_EXPECT_OK( ShouldIgnoreErrors({{"ignore_errors", {"false"}}}, &ignore_errors)); EXPECT_FALSE(ignore_errors); TF_EXPECT_OK(ShouldIgnoreErrors({}, &ignore_errors)); EXPECT_FALSE(ignore_errors); EXPECT_FALSE( ShouldIgnoreErrors({{"ignore_errors", {"foo"}}}, &ignore_errors).ok()); } }; TEST_F(TransformGraphTest, TestConstantFolding) { TestConstantFolding(); } TEST_F(TransformGraphTest, TestTransformRegistration) { TestTransformRegistration(); } TEST_F(TransformGraphTest, TestParseTransformParameters) { TestParseTransformParameters(); } TEST_F(TransformGraphTest, TestParseEscapedNewline) { TestParseEscapedNewline(); } TEST_F(TransformGraphTest, TestShouldIgnoreErrors) { TestShouldIgnoreErrors(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/transform_graph.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/transform_graph_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ac9824f0-e411-4d08-b0c7-6186c95f4a4c
cpp
tensorflow/tensorflow
obfuscate_names
tensorflow/tools/graph_transforms/obfuscate_names.cc
tensorflow/tools/graph_transforms/obfuscate_names_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status ObfuscateNames(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::unordered_set<string> required_nodes; for (const string& input : context.input_names) { required_nodes.insert(input); } for (const string& output : context.output_names) { required_nodes.insert(output); } const string valid_chars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; const int64_t chars_size = valid_chars.size(); std::map<string, string> new_names; int64_t name_index = 0; for (const NodeDef& input_node : input_graph_def.node()) { const string& old_name = input_node.name(); string new_name; if (required_nodes.count(old_name)) { new_name = old_name; } else { do { int64_t remaining = name_index; new_name = ""; while (true) { const int64_t remainder = (remaining % chars_size); const char current_char = valid_chars[remainder]; new_name = current_char + new_name; remaining /= chars_size; if (remaining <= 0) { break; } } ++name_index; } while (required_nodes.count(new_name)); } new_names[old_name] = new_name; } output_graph_def->Clear(); for (const NodeDef& input_node : input_graph_def.node()) { NodeDef* node = output_graph_def->mutable_node()->Add(); *node = input_node; const string& old_name = input_node.name(); node->set_name(new_names[old_name]); node->mutable_input()->Clear(); for (const string& input_name : input_node.input()) { string prefix; string input_node_name; string suffix; NodeNamePartsFromInput(input_name, &prefix, &input_node_name, &suffix); if (new_names.count(input_node_name) == 0) { return errors::InvalidArgument("No node named ", input_node_name, " for input to ", old_name); } string new_input_name = prefix + new_names[input_node_name] + suffix; *(node->mutable_input()->Add()) = new_input_name; } } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("obfuscate_names", ObfuscateNames); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status ObfuscateNames(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class ObfuscateNamesTest : public ::testing::Test { protected: void TestSimpleTree() { GraphDef graph_def; NodeDef* add_node1 = graph_def.add_node(); add_node1->set_name("add_node1"); add_node1->set_op("Add"); add_node1->add_input("add_node2"); add_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node3"); add_node3->add_input("const_node4"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* const_node4 = graph_def.add_node(); const_node4->set_name("const_node4"); const_node4->set_op("Const"); GraphDef result; TF_ASSERT_OK( ObfuscateNames(graph_def, {{"const_node1"}, {"add_node1"}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node1")); EXPECT_EQ(0, node_lookup.count("add_node2")); EXPECT_EQ(0, node_lookup.count("add_node3")); EXPECT_EQ(1, node_lookup.count("const_node1")); EXPECT_EQ(0, node_lookup.count("const_node2")); EXPECT_EQ(0, node_lookup.count("const_node3")); EXPECT_EQ(0, node_lookup.count("const_node4")); } void TestManyNodes() { GraphDef graph_def; for (int i = 0; i < 1000; ++i) { NodeDef* const_node = graph_def.add_node(); const_node->set_name(strings::StrCat("const_node", i)); const_node->set_op("Const"); } GraphDef result; TF_ASSERT_OK(ObfuscateNames(graph_def, {{"const_node0"}, {"const_node999"}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("const_node0")); EXPECT_EQ(0, node_lookup.count("const_node500")); EXPECT_EQ(1, node_lookup.count("const_node999")); } void TestNameClashes() { GraphDef graph_def; for (int i = 0; i < 1000; ++i) { NodeDef* const_node = graph_def.add_node(); const_node->set_name(strings::StrCat("1", i)); const_node->set_op("Const"); } GraphDef result; TF_ASSERT_OK(ObfuscateNames(graph_def, {{"10"}, {"19"}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("10")); EXPECT_EQ(1, node_lookup.count("19")); std::unordered_set<string> names; for (const NodeDef& node : result.node()) { EXPECT_EQ(0, names.count(node.name())) << "Found multiple nodes with name '" << node.name() << "'"; names.insert(node.name()); } } }; TEST_F(ObfuscateNamesTest, TestSimpleTree) { TestSimpleTree(); } TEST_F(ObfuscateNamesTest, TestManyNodes) { TestManyNodes(); } TEST_F(ObfuscateNamesTest, TestNameClashes) { TestNameClashes(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/obfuscate_names.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/obfuscate_names_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e49e8816-3d1e-41ac-87bf-ca044879aeaf
cpp
tensorflow/tensorflow
fold_batch_norms
tensorflow/tools/graph_transforms/fold_batch_norms.cc
tensorflow/tools/graph_transforms/fold_batch_norms_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FoldBatchNorms(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"Mul", { {"Conv2D|MatMul|DepthwiseConv2dNative", { {"*"}, {"Const"}, } }, {"Const"}, } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& mul_node = match.node; const NodeDef& conv_node = match.inputs[0].node; const NodeDef& input_node = match.inputs[0].inputs[0].node; const NodeDef& weights_node = match.inputs[0].inputs[1].node; const NodeDef& mul_values_node = match.inputs[1].node; for (const auto& node : {conv_node, weights_node, mul_values_node}) { if (output_nodes.count(node.name())) { new_nodes->insert(new_nodes->end(), {mul_node, conv_node, input_node, weights_node, mul_values_node}); return OkStatus(); } } Tensor weights = GetNodeTensorAttr(weights_node, "value"); Tensor mul_values = GetNodeTensorAttr(mul_values_node, "value"); int64_t weights_cols; if (conv_node.op() == "Conv2D") { weights_cols = weights.shape().dim_size(3); } else if (conv_node.op() == "DepthwiseConv2dNative") { weights_cols = weights.shape().dim_size(2) * weights.shape().dim_size(3); } else { weights_cols = weights.shape().dim_size(1); } if ((mul_values.shape().dims() != 1) || (mul_values.shape().dim_size(0) != weights_cols)) { return errors::InvalidArgument( "Mul constant input to batch norm has bad shape: ", mul_values.shape().DebugString()); } auto weights_vector = weights.flat<float>(); Tensor scaled_weights(DT_FLOAT, weights.shape()); auto scaled_weights_vector = scaled_weights.flat<float>(); for (int64_t row = 0; row < weights_vector.dimension(0); ++row) { scaled_weights_vector(row) = weights_vector(row) * mul_values.flat<float>()(row % weights_cols); } NodeDef scaled_weights_node; scaled_weights_node.set_op("Const"); scaled_weights_node.set_name(weights_node.name()); SetNodeAttr("dtype", DT_FLOAT, &scaled_weights_node); SetNodeTensorAttr<float>("value", scaled_weights, &scaled_weights_node); new_nodes->push_back(scaled_weights_node); new_nodes->push_back(input_node); NodeDef new_conv_node; new_conv_node = conv_node; new_conv_node.set_name(mul_node.name()); new_nodes->push_back(new_conv_node); return OkStatus(); }, {}, &replaced_graph_def)); *output_graph_def = replaced_graph_def; return OkStatus(); } REGISTER_GRAPH_TRANSFORM("fold_batch_norms", FoldBatchNorms); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status FoldBatchNorms(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class FoldBatchNormsTest : public ::testing::Test { protected: void TestFoldBatchNormsConv2D() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor mul_values_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mul_values_data, {2.0f, 3.0f}); Output mul_values_op = Const(root.WithOpName("mul_values"), Input::Initializer(mul_values_data)); Output mul_op = Mul(root.WithOpName("output"), conv_op, mul_values_op); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK( FoldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("Mul", node.op()); } } void TestFoldBatchNormsDepthwiseConv2dNative() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor mul_values_data(DT_FLOAT, TensorShape({4})); test::FillValues<float>(&mul_values_data, {2.0f, 3.0f, 4.0f, 5.0f}); Output mul_values_op = Const(root.WithOpName("mul_values"), Input::Initializer(mul_values_data)); Output mul_op = Mul(root.WithOpName("output"), conv_op, mul_values_op); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK( FoldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("Mul", node.op()); } } void TestFoldBatchNormsConv2DShared() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); Tensor mul_values_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mul_values_data, {2.0f, 3.0f}); Output mul_values_op = Const(root.WithOpName("mul_values"), Input::Initializer(mul_values_data)); Output mul_op = Mul(root.WithOpName("output"), conv_op, mul_values_op); Tensor mul_values_data_2(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mul_values_data_2, {1.0f, 2.0f}); Output mul_values_op_2 = Const(root.WithOpName("mul_values_2"), Input::Initializer(mul_values_data)); Output mul_op_2 = Mul(root.WithOpName("output_2"), conv_op, mul_values_op_2); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output", "output_2"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK(FoldBatchNorms( original_graph_def, {{}, {"output", "output_2"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK( fused_session->Run({}, {"output", "output_2"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); test::ExpectTensorNear<float>(original_outputs[1], fused_outputs[1], 1e-5); } void TestFoldBatchNormsMatMul() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({2, 2})); test::FillValues<float>(&weights_data, {1.0f, 2.0f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output matmul_op = MatMul(root.WithOpName("matmul_op"), input_op, weights_op); Tensor mul_values_data(DT_FLOAT, TensorShape({2})); test::FillValues<float>(&mul_values_data, {2.0f, 3.0f}); Output mul_values_op = Const(root.WithOpName("mul_values"), Input::Initializer(mul_values_data)); Output mul_op = Mul(root.WithOpName("output"), matmul_op, mul_values_op); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef fused_graph_def; TF_ASSERT_OK( FoldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def)); std::unique_ptr<Session> fused_session(NewSession(SessionOptions())); TF_ASSERT_OK(fused_session->Create(fused_graph_def)); std::vector<Tensor> fused_outputs; TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs)); test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5); for (const NodeDef& node : fused_graph_def.node()) { EXPECT_NE("Mul", node.op()); } } }; TEST_F(FoldBatchNormsTest, TestFoldBatchNormsConv2D) { TestFoldBatchNormsConv2D(); } TEST_F(FoldBatchNormsTest, TestFoldBatchNormsMatMul) { TestFoldBatchNormsMatMul(); } TEST_F(FoldBatchNormsTest, TestFoldBatchNormsDepthwiseConv2dNative) { TestFoldBatchNormsDepthwiseConv2dNative(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_batch_norms.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_batch_norms_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
727970ad-3063-4030-bfab-9ed7b4be0677
cpp
tensorflow/tensorflow
backports
tensorflow/tools/graph_transforms/backports.cc
tensorflow/tools/graph_transforms/backports_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status BackportConcatV2Transform(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"ConcatV2"}, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& concat_v2_node = match.node; NodeDef concat_node = concat_v2_node; concat_node.set_op("Concat"); concat_node.mutable_input()->Clear(); const string& dim_input = concat_v2_node.input(concat_v2_node.input_size() - 1); concat_node.add_input(dim_input); for (int i = 0; i < (concat_v2_node.input_size() - 1); ++i) { concat_node.add_input(concat_v2_node.input(i)); } concat_node.mutable_attr()->erase("Tidx"); new_nodes->push_back(concat_node); return OkStatus(); }, {true}, output_graph_def)); return OkStatus(); } REGISTER_GRAPH_TRANSFORM("backport_concatv2", BackportConcatV2Transform); Status BackportTensorArrayV3Transform(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::map<string, string> inputs_to_rename; GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"TensorArrayV3|TensorArrayGradV3"}, [&inputs_to_rename](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& tensor_array_v3_node = match.node; NodeDef tensor_array_v2_node = tensor_array_v3_node; if (tensor_array_v3_node.op() == "TensorArrayV3") { tensor_array_v2_node.set_op("TensorArrayV2"); } else { tensor_array_v2_node.set_op("TensorArrayGradV2"); } NodeDef replacement_flow_node; replacement_flow_node.set_op("Const"); SetNodeAttr("dtype", DT_FLOAT, &replacement_flow_node); replacement_flow_node.set_name(tensor_array_v3_node.name() + "/replacement_flow_node"); Tensor replacement_flow_tensor(DT_FLOAT, {}); replacement_flow_tensor.flat<float>()(0) = 1.0f; SetNodeTensorAttr<float>("value", replacement_flow_tensor, &replacement_flow_node); inputs_to_rename[tensor_array_v3_node.name() + ":1"] = replacement_flow_node.name(); new_nodes->push_back(tensor_array_v2_node); new_nodes->push_back(replacement_flow_node); return OkStatus(); }, {true}, &replaced_graph_def)); GraphDef renamed_graph_def; TF_RETURN_IF_ERROR(RenameNodeInputs(replaced_graph_def, inputs_to_rename, std::unordered_set<string>(), &renamed_graph_def)); TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( renamed_graph_def, {"TensorArrayWriteV3|TensorArrayReadV3|TensorArrayGatherV3|" "TensorArrayScatterV3|TensorArrayConcatV3|TensorArraySplitV3|" "TensorArraySizeV3|TensorArrayCloseV3"}, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& v3_node = match.node; NodeDef v2_node = v3_node; v2_node.set_op(v3_node.op().substr(0, v3_node.op().size() - 1) + "2"); new_nodes->push_back(v2_node); return OkStatus(); }, {true}, output_graph_def)); return OkStatus(); } REGISTER_GRAPH_TRANSFORM("backport_tensor_array_v3", BackportTensorArrayV3Transform); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status BackportConcatV2Transform(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status BackportTensorArrayV3Transform(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class BackportConcatV2Test : public ::testing::Test { protected: void TestBackportConcatV2() { GraphDef graph_def; NodeDef* mul_node1 = graph_def.add_node(); mul_node1->set_name("mul_node1"); mul_node1->set_op("Mul"); mul_node1->add_input("add_node2"); mul_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* concat_node = graph_def.add_node(); concat_node->set_name("concat_node"); concat_node->set_op("ConcatV2"); concat_node->add_input("const_node1"); concat_node->add_input("const_node2"); concat_node->add_input("const_node3"); SetNodeAttr("Tidx", DT_INT32, concat_node); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"concat_node"}; TF_ASSERT_OK(BackportConcatV2Transform(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("concat_node")); EXPECT_EQ("Concat", node_lookup.at("concat_node")->op()); EXPECT_EQ(0, node_lookup.at("concat_node")->attr().count("Tidx")); EXPECT_EQ("const_node3", node_lookup.at("concat_node")->input(0)); EXPECT_EQ("const_node1", node_lookup.at("concat_node")->input(1)); EXPECT_EQ("const_node2", node_lookup.at("concat_node")->input(2)); EXPECT_EQ(1, node_lookup.count("const_node1")); EXPECT_EQ("Const", node_lookup.at("const_node1")->op()); EXPECT_EQ(1, node_lookup.count("const_node2")); EXPECT_EQ("Const", node_lookup.at("const_node2")->op()); EXPECT_EQ(1, node_lookup.count("const_node3")); EXPECT_EQ("Const", node_lookup.at("const_node3")->op()); } }; TEST_F(BackportConcatV2Test, TestBackportConcatV2) { TestBackportConcatV2(); } TEST(BackportTensorArrayV3Test, TestBackportTensorArrayV3) { GraphDef graph_def; NodeDef* size_node = graph_def.add_node(); size_node->set_name("size_node"); size_node->set_op("Const"); Tensor size_tensor(DT_INT32, {}); size_tensor.flat<int32>()(0) = 1; SetNodeTensorAttr<float>("value", size_tensor, size_node); NodeDef* tensor_array_node = graph_def.add_node(); tensor_array_node->set_name("tensor_array_node"); tensor_array_node->set_op("TensorArrayV3"); tensor_array_node->add_input("size_node"); SetNodeAttr("dtype", DT_FLOAT, tensor_array_node); SetNodeAttr("element_shape", TensorShape({1, 2}), tensor_array_node); SetNodeAttr("dynamic_size", false, tensor_array_node); SetNodeAttr("clear_after_read", true, tensor_array_node); SetNodeAttr("tensor_array_name", "some_name", tensor_array_node); NodeDef* handle_output_node = graph_def.add_node(); handle_output_node->set_name("handle_output_node"); handle_output_node->set_op("Identity"); handle_output_node->add_input("tensor_array_node:0"); NodeDef* flow_output_node = graph_def.add_node(); flow_output_node->set_name("flow_output_node"); flow_output_node->set_op("Identity"); flow_output_node->add_input("tensor_array_node:1"); NodeDef* tensor_array_grad_node = graph_def.add_node(); tensor_array_grad_node->set_name("tensor_array_grad_node"); tensor_array_grad_node->set_op("TensorArrayGradV3"); tensor_array_grad_node->add_input("tensor_array_node:0"); tensor_array_grad_node->add_input("tensor_array_node:1"); SetNodeAttr("source", "foo", tensor_array_grad_node); NodeDef* grad_handle_output_node = graph_def.add_node(); grad_handle_output_node->set_name("grad_handle_output_node"); grad_handle_output_node->set_op("Identity"); grad_handle_output_node->add_input("tensor_array_grad_node:0"); NodeDef* grad_flow_output_node = graph_def.add_node(); grad_flow_output_node->set_name("grad_flow_output_node"); grad_flow_output_node->set_op("Identity"); grad_flow_output_node->add_input("tensor_array_grad_node:1"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"handle_output_node", "grad_handle_output_node"}; TF_ASSERT_OK(BackportTensorArrayV3Transform(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); ASSERT_EQ(1, node_lookup.count("tensor_array_node")); EXPECT_EQ("TensorArrayV2", node_lookup.at("tensor_array_node")->op()); EXPECT_EQ("TensorArrayGradV2", node_lookup.at("tensor_array_grad_node")->op()); for (const NodeDef& node : result.node()) { for (const string& input : node.input()) { EXPECT_NE("tensor_array_node:1", input); } } } TEST(BackportTensorArrayV3Test, TestBackportTensorArrayV3Subtypes) { const std::vector<string> v3_ops = { "TensorArrayWriteV3", "TensorArrayReadV3", "TensorArrayGatherV3", "TensorArrayScatterV3", "TensorArrayConcatV3", "TensorArraySplitV3", "TensorArraySizeV3", "TensorArrayCloseV3"}; for (const string& v3_op : v3_ops) { GraphDef graph_def; NodeDef* v3_node = graph_def.add_node(); v3_node->set_name("v3_node"); v3_node->set_op(v3_op); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {""}; TF_ASSERT_OK(BackportTensorArrayV3Transform(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); ASSERT_EQ(1, node_lookup.count("v3_node")); EXPECT_TRUE(absl::EndsWith(node_lookup.at("v3_node")->op(), "V2")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/backports.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/backports_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d0a03331-2a39-4181-8b06-23dbe6434c23
cpp
tensorflow/tensorflow
file_utils
tensorflow/core/data/service/snapshot/file_utils.cc
tensorflow/core/data/service/snapshot/file_utils_test.cc
#include "tensorflow/core/data/service/snapshot/file_utils.h" #include <cstdint> #include <string> #include <utility> #include <vector> #include "absl/functional/function_ref.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/tsl/protobuf/status.pb.h" #include "tensorflow/core/data/service/snapshot/path_utils.h" #include "tensorflow/core/data/snapshot_utils.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/tensor.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/random.h" #include "tsl/platform/status_to_from_proto.h" namespace tensorflow { namespace data { namespace { constexpr const char kTempFileSuffix[] = ".tmp"; absl::Status AtomicallyWrite( absl::string_view filename, tsl::Env* env, absl::FunctionRef<absl::Status(const std::string&)> nonatomically_write) { std::string uncommitted_filename = absl::StrCat(filename, "__"); if (!env->CreateUniqueFileName(&uncommitted_filename, kTempFileSuffix)) { return tsl::errors::Internal("Failed to write file ", filename, ": Unable to create temporary files."); } TF_RETURN_IF_ERROR(nonatomically_write(uncommitted_filename)); absl::Status status = env->RenameFile(uncommitted_filename, std::string(filename)); if (!status.ok()) { return tsl::errors::Internal("Failed to rename file: ", status.ToString(), ". Source: ", uncommitted_filename, ", destination: ", filename); } return status; } } absl::Status AtomicallyWriteStringToFile(absl::string_view filename, absl::string_view str, tsl::Env* env) { auto nonatomically_write = [&](const std::string& uncommitted_filename) { TF_RETURN_IF_ERROR(WriteStringToFile(env, uncommitted_filename, str)); return absl::OkStatus(); }; TF_RETURN_WITH_CONTEXT_IF_ERROR( AtomicallyWrite(filename, env, nonatomically_write), "Requested to write string: ", str); return absl::OkStatus(); } absl::Status AtomicallyWriteBinaryProto(absl::string_view filename, const tsl::protobuf::Message& proto, tsl::Env* env) { auto nonatomically_write = [&](const std::string& uncommitted_filename) { TF_RETURN_IF_ERROR(WriteBinaryProto(env, uncommitted_filename, proto)); return absl::OkStatus(); }; TF_RETURN_WITH_CONTEXT_IF_ERROR( AtomicallyWrite(filename, env, nonatomically_write), "Requested to write proto in binary format: ", proto.DebugString()); return absl::OkStatus(); } absl::Status AtomicallyWriteTextProto(absl::string_view filename, const tsl::protobuf::Message& proto, tsl::Env* env) { auto nonatomically_write = [&](const std::string& uncommitted_filename) { TF_RETURN_IF_ERROR(WriteTextProto(env, uncommitted_filename, proto)); return absl::OkStatus(); }; TF_RETURN_WITH_CONTEXT_IF_ERROR( AtomicallyWrite(filename, env, nonatomically_write), "Requested to write proto in text format: ", proto.DebugString()); return absl::OkStatus(); } absl::Status AtomicallyWriteTFRecords(absl::string_view filename, const std::vector<Tensor>& tensors, absl::string_view compression, tsl::Env* env) { auto nonatomically_write = [&](const std::string& uncommitted_filename) { snapshot_util::TFRecordWriter writer(uncommitted_filename, std::string(compression)); TF_RETURN_IF_ERROR(writer.Initialize(env)); TF_RETURN_IF_ERROR(writer.WriteTensors(tensors)); return writer.Close(); }; TF_RETURN_WITH_CONTEXT_IF_ERROR( AtomicallyWrite(filename, env, nonatomically_write), " Requested to atomically write TF record file: ", filename); return absl::OkStatus(); } absl::StatusOr<std::vector<std::string>> GetChildren( absl::string_view directory, tsl::Env* env) { std::vector<std::string> files, result; TF_RETURN_IF_ERROR(env->FileExists(std::string(directory))); absl::Status status = env->GetChildren(std::string(directory), &files); if (absl::IsNotFound(status)) { return result; } for (std::string& file : files) { if (!IsTemporaryFile(file)) { result.push_back(std::move(file)); } } return result; } bool IsTemporaryFile(absl::string_view filename) { return absl::EndsWith(filename, kTempFileSuffix); } int64_t SnapshotChunksCardinality(absl::string_view snapshot_path, tsl::Env* env) { if (!env->FileExists(SnapshotDoneFilePath(snapshot_path)).ok()) { return kUnknownCardinality; } absl::StatusOr<std::vector<std::string>> chunks = GetChildren(CommittedChunksDirectory(snapshot_path), env); if (!chunks.ok()) { return kUnknownCardinality; } return chunks->size(); } } }
#include "tensorflow/core/data/service/snapshot/file_utils.h" #include <cstdint> #include <string> #include <vector> #include "xla/tsl/lib/core/status_test_util.h" #include "xla/tsl/lib/io/compression.h" #include "tensorflow/core/data/dataset_test_base.h" #include "tensorflow/core/data/service/test_util.h" #include "tensorflow/core/data/snapshot_utils.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace data { namespace { using ::testing::ElementsAre; using ::testing::IsEmpty; using tsl::testing::IsOkAndHolds; using tsl::testing::StatusIs; absl::StatusOr<std::string> CreateTestDirectory() { std::string directory; if (!tsl::Env::Default()->LocalTempFilename(&directory)) { return tsl::errors::FailedPrecondition( "Failed to create local test directory."); } TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(directory)); return directory; } using AtomicallyWriteStringToFileTest = ::testing::TestWithParam<std::string>; TEST_P(AtomicallyWriteStringToFileTest, WriteString) { TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory()); std::string test_file = tsl::io::JoinPath(directory, "test_file"); std::string file_contents = GetParam(); TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, file_contents, tsl::Env::Default())); std::string data; TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file)); TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), test_file, &data)); EXPECT_EQ(data, file_contents); } INSTANTIATE_TEST_SUITE_P(FileContents, AtomicallyWriteStringToFileTest, ::testing::ValuesIn<std::string>({"OK", ""})); TEST(FileUtilsTest, AtomicallyWriteBinaryProto) { TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory()); std::string test_file = tsl::io::JoinPath(directory, "test_file"); DatasetDef out = testing::RangeDataset(10); TF_ASSERT_OK(AtomicallyWriteBinaryProto(test_file, out, tsl::Env::Default())); DatasetDef in; TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file)); TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), test_file, &in)); EXPECT_THAT(in, testing::EqualsProto(out)); } TEST(FileUtilsTest, AtomicallyWriteTextProto) { TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory()); std::string test_file = tsl::io::JoinPath(directory, "test_file"); DatasetDef out = testing::RangeDataset(10); TF_ASSERT_OK(AtomicallyWriteTextProto(test_file, out, tsl::Env::Default())); DatasetDef in; TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file)); TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), test_file, &in)); EXPECT_THAT(in, testing::EqualsProto(out)); } TEST(FileUtilsTest, AtomicallyWriteTFRecord) { TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory()); std::string test_file = tsl::io::JoinPath(directory, "test_file"); Tensor out = CreateTensor<int64_t>(TensorShape({2}), {1, 2}); TF_ASSERT_OK(AtomicallyWriteTFRecords( test_file, {out}, tsl::io::compression::kSnappy, tsl::Env::Default())); TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file)); snapshot_util::TFRecordReaderImpl reader(test_file, tsl::io::compression::kSnappy); TF_ASSERT_OK(reader.Initialize(tsl::Env::Default())); TF_ASSERT_OK_AND_ASSIGN(std::vector<Tensor> in, reader.GetTensors()); EXPECT_EQ(out.DebugString(), in.front().DebugString()); } TEST(FileUtilsTest, GetChildren) { TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory()); std::string test_file = tsl::io::JoinPath(directory, "test_file"); TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, "", tsl::Env::Default())); std::string tmp_file = tsl::io::JoinPath(directory, "test_file.tmp"); TF_ASSERT_OK(AtomicallyWriteStringToFile(tmp_file, "", tsl::Env::Default())); EXPECT_THAT(GetChildren(directory, tsl::Env::Default()), IsOkAndHolds(ElementsAre("test_file"))); } TEST(FileUtilsTest, GetChildrenEmptyDirectory) { TF_ASSERT_OK_AND_ASSIGN(std::string empty_directory, CreateTestDirectory()); EXPECT_THAT(GetChildren(empty_directory, tsl::Env::Default()), IsOkAndHolds(IsEmpty())); } TEST(FileUtilsTest, GetChildrenDirectoryNotFound) { EXPECT_THAT(GetChildren("Not exist", tsl::Env::Default()), StatusIs(tsl::error::NOT_FOUND)); } TEST(FileUtilsTest, IsTemporaryFile) { EXPECT_TRUE(IsTemporaryFile("file.tmp")); EXPECT_FALSE(IsTemporaryFile("file")); EXPECT_FALSE(IsTemporaryFile("")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/file_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/file_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
837280c5-10b4-4acb-af2b-bf67bd73c365
cpp
tensorflow/tensorflow
round_weights
tensorflow/tools/graph_transforms/round_weights.cc
tensorflow/tools/graph_transforms/round_weights_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/threadpool_device.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RoundWeights(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { int32_t num_steps; TF_RETURN_IF_ERROR( context.GetOneInt32Parameter("num_steps", 256, &num_steps)); TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"Const"}, [num_steps](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& old_const_node = match.node; if (!old_const_node.attr().count("dtype")) { return errors::InvalidArgument("No 'dtype' attribute for Const node ", old_const_node.name()); } if (!old_const_node.attr().count("value")) { return errors::InvalidArgument("No 'value' attribute for Const node ", old_const_node.name()); } const DataType old_dtype = old_const_node.attr().at("dtype").type(); Tensor old_tensor; if (!old_tensor.FromProto(old_const_node.attr().at("value").tensor())) { return errors::InvalidArgument("Decoding Tensor failed for node", old_const_node.name()); } const size_t num_elements = old_tensor.NumElements(); if ((old_dtype != DT_FLOAT) || (num_elements < 16)) { new_nodes->push_back(old_const_node); return OkStatus(); } const float* old_values = old_tensor.flat<float>().data(); float min = std::numeric_limits<float>::max(); float max = std::numeric_limits<float>::min(); for (int i = 0; i < num_elements; ++i) { const float value = old_values[i]; min = std::min(min, value); max = std::max(max, value); } if (min == max) { if (std::abs(min) < 0.000001f) { max = min + 1.0f; } else if (min > 0) { max = 2.0f * min; } else { min = 2.0f * max; } } Tensor rounded_tensor(DT_FLOAT, old_tensor.shape()); float* rounded_values = rounded_tensor.flat<float>().data(); const float bucket_width = (max - min) / num_steps; for (int i = 0; i < num_elements; ++i) { const int32_t bucket = std::floor((old_values[i] - min) / bucket_width); rounded_values[i] = min + (bucket_width * (bucket + 0.5f)); } NodeDef rounded_const_node; rounded_const_node.set_op("Const"); rounded_const_node.set_name(old_const_node.name()); SetNodeAttr("dtype", DT_FLOAT, &rounded_const_node); SetNodeTensorAttr<float>("value", rounded_tensor, &rounded_const_node); new_nodes->push_back(rounded_const_node); return OkStatus(); }, {}, output_graph_def)); return OkStatus(); } REGISTER_GRAPH_TRANSFORM("round_weights", RoundWeights); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RoundWeights(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class RoundWeightsTest : public ::testing::Test { protected: void TestRoundWeights() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>( &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f, -5.0f, -3.0f, -6.0f}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_data)); Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 10})); test::FillValues<float>( &weights_data, {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f}); Output weights_op = Const(root.WithOpName("weights_op"), Input::Initializer(weights_data)); Output conv_op = Conv2D(root.WithOpName("output"), input_op, weights_op, {1, 1, 1, 1}, "VALID"); GraphDef original_graph_def; TF_ASSERT_OK(root.ToGraphDef(&original_graph_def)); std::unique_ptr<Session> original_session(NewSession(SessionOptions())); TF_ASSERT_OK(original_session->Create(original_graph_def)); std::vector<Tensor> original_outputs; TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs)); GraphDef rounded_graph_def; TF_ASSERT_OK( RoundWeights(original_graph_def, {{}, {"output"}}, &rounded_graph_def)); std::unique_ptr<Session> rounded_session(NewSession(SessionOptions())); TF_ASSERT_OK(rounded_session->Create(rounded_graph_def)); std::vector<Tensor> rounded_outputs; TF_ASSERT_OK(rounded_session->Run({}, {"output"}, {}, &rounded_outputs)); test::ExpectTensorNear<float>(original_outputs[0], rounded_outputs[0], 0.5); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(rounded_graph_def, &node_lookup); EXPECT_EQ(1, node_lookup.count("input_op")); const NodeDef* r_input_op = node_lookup.at("input_op"); EXPECT_EQ(DT_FLOAT, r_input_op->attr().at("dtype").type()); EXPECT_EQ(1, node_lookup.count("weights_op")); const NodeDef* r_weights_op = node_lookup.at("weights_op"); EXPECT_EQ("Const", r_weights_op->op()); EXPECT_EQ(DT_FLOAT, r_weights_op->attr().at("dtype").type()); } }; TEST_F(RoundWeightsTest, TestRoundWeights) { TestRoundWeights(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/round_weights.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/round_weights_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ee76d99d-3019-456e-84c1-282e1460ac56
cpp
tensorflow/tensorflow
remove_attribute
tensorflow/tools/graph_transforms/remove_attribute.cc
tensorflow/tools/graph_transforms/remove_attribute_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RemoveAttribute(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { if (!context.params.count("attribute_name") || (context.params.at("attribute_name").size() != 1)) { return errors::InvalidArgument( "remove_attribute expects exactly one 'attribute_name' " "argument, e.g. remove_attribute(op_name=Mul, attribute_name=foo)"); } string op_name; if (context.params.count("op_name")) { if (context.params.at("op_name").size() != 1) { return errors::InvalidArgument( "remove_attribute expects a single op_name argument, but found ", context.params.at("op_name").size()); } op_name = context.params.at("op_name")[0]; } else { op_name = "*"; } const string attribute_name = context.params.at("attribute_name")[0]; output_graph_def->Clear(); for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; if (((op_name == "*") || (op_name == node.op())) && (node.attr().count(attribute_name))) { new_node->mutable_attr()->erase(attribute_name); } } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("remove_attribute", RemoveAttribute); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RemoveAttribute(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class RemoveAttributeTest : public ::testing::Test { protected: void TestRemoveAttribute() { GraphDef graph_def; NodeDef* mul_node1 = graph_def.add_node(); mul_node1->set_name("mul_node1"); mul_node1->set_op("Mul"); mul_node1->add_input("add_node2"); mul_node1->add_input("add_node3"); SetNodeAttr<int32>("foo", 23, mul_node1); SetNodeAttr<string>("bar", "something", mul_node1); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); SetNodeAttr<int32>("foo", 46, add_node2); SetNodeAttr<int32>("bob", 23, add_node2); SetNodeAttr<string>("bar", "something else", add_node2); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* add_node4 = graph_def.add_node(); add_node4->set_name("add_node4"); add_node4->set_op("Add"); add_node4->add_input("add_node2"); add_node4->add_input("add_node3"); GraphDef wildcard_result; TransformFuncContext context; context.input_names = {}; context.output_names = {"mul_node1"}; context.params.insert( std::pair<string, std::vector<string>>({"op_name", {string("*")}})); context.params.insert(std::pair<string, std::vector<string>>( {"attribute_name", {string("foo")}})); TF_ASSERT_OK(RemoveAttribute(graph_def, context, &wildcard_result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(wildcard_result, &node_lookup); EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo")); EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar")); EXPECT_EQ(0, node_lookup.at("add_node2")->attr().count("foo")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob")); GraphDef targeted_result; TransformFuncContext targeted_context; targeted_context.input_names = {}; targeted_context.output_names = {"mul_node1"}; targeted_context.params.insert( std::pair<string, std::vector<string>>({"op_name", {string("Mul")}})); targeted_context.params.insert(std::pair<string, std::vector<string>>( {"attribute_name", {string("foo")}})); TF_ASSERT_OK( RemoveAttribute(graph_def, targeted_context, &targeted_result)); MapNamesToNodes(targeted_result, &node_lookup); EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo")); EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("foo")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob")); } }; TEST_F(RemoveAttributeTest, TestRemoveAttribute) { TestRemoveAttribute(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_attribute.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_attribute_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e715a858-471c-4f6e-a0ed-8541b362a1b1
cpp
tensorflow/tensorflow
transform_utils
tensorflow/tools/graph_transforms/transform_utils.cc
tensorflow/tools/graph_transforms/transform_utils_test.cc
#include "tensorflow/tools/graph_transforms/transform_utils.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/lib/strings/str_util.h" namespace tensorflow { namespace graph_transforms { namespace { inline bool IsMerge(const NodeDef& node_def) { return node_def.op() == "Merge" || node_def.op() == "RefMerge" || node_def.op() == "_XlaMerge"; } void RecordMatchedNodes(const NodeMatch& match, std::set<string>* matched_nodes) { matched_nodes->insert(match.node.name()); for (const NodeMatch& input_match : match.inputs) { RecordMatchedNodes(input_match, matched_nodes); } } inline uint64 Hash64String(const string& input) { return Hash64(input.data(), input.size()); } } void MatchedNodesAsArray(const NodeMatch& match, std::vector<NodeDef>* result) { std::set<string> found_nodes; std::vector<NodeMatch> current_matches = {match}; while (!current_matches.empty()) { std::vector<NodeMatch> next_matches; for (const NodeMatch& current_match : current_matches) { if (found_nodes.count(current_match.node.name())) { continue; } found_nodes.insert(current_match.node.name()); result->push_back(current_match.node); for (const NodeMatch& input_match : current_match.inputs) { next_matches.push_back(input_match); } } current_matches = next_matches; } } void MapNamesToNodes(const GraphDef& graph_def, std::map<string, const NodeDef*>* result) { for (const NodeDef& node : graph_def.node()) { (*result)[node.name()] = &node; } } void MapNodesToOutputs(const GraphDef& graph_def, std::map<string, std::vector<const NodeDef*>>* result) { std::map<string, const NodeDef*> node_map; MapNamesToNodes(graph_def, &node_map); for (const NodeDef& node : graph_def.node()) { for (const string& input : node.input()) { string input_node_name = NodeNameFromInput(input); (*result)[input_node_name].push_back(&node); } } } void NodeNamePartsFromInput(const string& input_name, string* prefix, string* node_name, string* suffix) { std::vector<string> input_parts = str_util::Split(input_name, ':'); if (input_parts.size() < 2) { *suffix = ""; } else { *suffix = ":" + input_parts[1]; } StringPiece node_name_piece(input_parts[0]); if (absl::ConsumePrefix(&node_name_piece, "^")) { *prefix = "^"; } else { *prefix = ""; } *node_name = string(node_name_piece); } string NodeNameFromInput(const string& input_name) { string prefix; string node_name; string suffix; NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix); return node_name; } string CanonicalInputName(const string& input_name) { string prefix; string node_name; string suffix; NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix); if (suffix.empty()) { suffix = ":0"; } return prefix + node_name + suffix; } uint64 HashNodeDef(const NodeDef& node) { uint64 hash = Hash64String(node.op()); hash = Hash64Combine(hash, Hash64String(node.name())); for (const string& input : node.input()) { hash = Hash64Combine(hash, Hash64String(CanonicalInputName(input))); } hash = Hash64Combine(hash, Hash64String(node.device())); std::vector<string> attr_names; attr_names.reserve(node.attr().size()); for (const auto& attr : node.attr()) { attr_names.push_back(attr.first); } std::sort(attr_names.begin(), attr_names.end()); string attr_serialized; for (const string& attr_name : attr_names) { auto attr = node.attr().at(attr_name); attr.SerializeToString(&attr_serialized); hash = Hash64Combine(hash, Hash64String(attr_serialized)); } return hash; } void AddNodeInput(const string& input_name, NodeDef* node) { *(node->mutable_input()->Add()) = input_name; } void CopyNodeAttr(const NodeDef& source, const string& source_key, const string& dest_key, NodeDef* dest) { CHECK_NE(0, source.attr().count(source_key)) << "No key '" << source_key << "' found in " << source.DebugString(); (*(dest->mutable_attr()))[dest_key] = source.attr().at(source_key); } Tensor GetNodeTensorAttr(const NodeDef& node, const string& key) { TensorProto tensor_proto = node.attr().at(key).tensor(); Tensor tensor; CHECK(tensor.FromProto(tensor_proto)); return tensor; } void FilterGraphDef(const GraphDef& input_graph_def, std::function<bool(const NodeDef&)> selector, GraphDef* output_graph_def) { output_graph_def->mutable_node()->Clear(); for (const NodeDef& node : input_graph_def.node()) { if (selector(node)) { *output_graph_def->mutable_node()->Add() = node; } } } void RemoveAttributes(const GraphDef& input_graph_def, const std::vector<string>& attributes, GraphDef* output_graph_def) { output_graph_def->mutable_node()->Clear(); for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; for (const string& attribute : attributes) { new_node->mutable_attr()->erase(attribute); } } } Status SortByExecutionOrder(const GraphDef& input_graph_def, GraphDef* output_graph_def) { const int num_nodes = input_graph_def.node_size(); std::vector<int> ready; std::vector<int> pending_count; pending_count.reserve(num_nodes); std::vector<gtl::InlinedVector<int, 4>> outputs(num_nodes); std::map<string, int> name_index; for (int i = 0; i < input_graph_def.node_size(); ++i) { const NodeDef& node(input_graph_def.node(i)); name_index[node.name()] = i; } for (int n = 0; n < num_nodes; ++n) { const NodeDef& node_def(input_graph_def.node(n)); if (IsMerge(node_def)) { int32_t num_control_edges = 0; for (int i = 0; i < node_def.input_size(); ++i) { if (absl::StartsWith(node_def.input(i), "^")) { num_control_edges++; } } pending_count.push_back(num_control_edges + 1); } else { pending_count.push_back(node_def.input_size()); } if (node_def.input_size() == 0) { ready.push_back(n); continue; } for (int i = 0; i < node_def.input_size(); ++i) { const string& input_name = node_def.input(i); const string& input_node_name = NodeNameFromInput(input_name); if (!name_index.count(input_node_name)) { return errors::InvalidArgument("Node '", node_def.name(), "': Unknown input node '", node_def.input(i), "'"); } outputs[name_index[input_node_name]].push_back(n); } } int processed = 0; output_graph_def->Clear(); while (!ready.empty()) { int o = ready.back(); ready.pop_back(); ++processed; const NodeDef& node_def(input_graph_def.node(o)); *output_graph_def->mutable_node()->Add() = node_def; for (size_t i = 0; i < outputs[o].size(); ++i) { const int output = outputs[o][i]; pending_count[output]--; if (pending_count[output] == 0) { ready.push_back(output); } } } if (processed < num_nodes) { LOG(WARNING) << "IN " << __func__ << (num_nodes - processed) << " NODES IN A CYCLE"; for (int64_t i = 0; i < num_nodes; i++) { if (pending_count[i] != 0) { LOG(WARNING) << "PENDING: " << SummarizeNodeDef(input_graph_def.node(i)) << "WITH PENDING COUNT = " << pending_count[i]; } } return errors::InvalidArgument(num_nodes - processed, " nodes in a cycle"); } return OkStatus(); } string OpTypePattern::DebugString() const { string result = "{" + op + ", {"; for (const OpTypePattern& input : inputs) { result += input.DebugString() + ","; } result += "}}"; return result; } string NodeMatch::DebugString() const { string result = "{"; result += node.DebugString(); result += ", {"; for (const NodeMatch& input : inputs) { result += input.DebugString() + ","; } result += "}}"; return result; } GraphMatcher::GraphMatcher(const GraphDef& graph_def) { SortByExecutionOrder(graph_def, &graph_def_).IgnoreError(); MapNamesToNodes(graph_def_, &node_map_); } Status GraphMatcher::GetOpTypeMatches(const OpTypePattern& pattern, std::vector<NodeMatch>* matches) { std::set<string> matched_nodes; for (const NodeDef& node : graph_def_.node()) { if (matched_nodes.count(node.name())) { continue; } NodeMatch match; if (DoesOpTypeMatch(node, pattern, matched_nodes, &match)) { RecordMatchedNodes(match, &matched_nodes); matches->push_back(match); } } return OkStatus(); } bool GraphMatcher::DoesOpTypeMatch( const NodeDef& node, const OpTypePattern& pattern, const std::set<string>& previously_matched_nodes, NodeMatch* match) { VLOG(1) << "Looking at node " << node.DebugString(); VLOG(1) << "pattern=" << pattern.DebugString(); VLOG(1) << "match=" << match->DebugString(); if (previously_matched_nodes.count(node.name())) { VLOG(1) << "node " << node.name() << " has been previously matched"; return false; } bool pattern_matched = false; if (pattern.op == "*") { pattern_matched = true; } else { std::vector<string> pattern_ops = str_util::Split(pattern.op, '|'); for (const string& pattern_op : pattern_ops) { if (node.op() == pattern_op) { pattern_matched = true; } } } if (!pattern_matched) { VLOG(1) << "node.op() != pattern.op()"; return false; } match->node = node; std::vector<string> non_control_inputs; for (const string& input : node.input()) { if (!input.empty() && (input[0] != '^')) { non_control_inputs.push_back(input); } } if (pattern.inputs.empty()) { return true; } if (non_control_inputs.size() != pattern.inputs.size()) { VLOG(1) << "non_control_inputs.size() != pattern.inputs.size()"; return false; } for (int i = 0; i < pattern.inputs.size(); ++i) { const string& input_node_name = NodeNameFromInput(non_control_inputs[i]); const NodeDef& input_node = *(node_map_[input_node_name]); const OpTypePattern& input_pattern = pattern.inputs[i]; match->inputs.push_back(NodeMatch()); NodeMatch* input_match = &(match->inputs.back()); if (!DoesOpTypeMatch(input_node, input_pattern, previously_matched_nodes, input_match)) { return false; } } return true; } Status ReplaceMatchingOpTypes( const GraphDef& input_graph_def, const OpTypePattern& pattern, const std::function<Status(const NodeMatch&, const std::set<string>&, const std::set<string>&, std::vector<NodeDef>*)>& node_generator, const ReplaceMatchingOpTypesOptions& options, GraphDef* output_graph_def) { GraphMatcher matcher(input_graph_def); std::vector<NodeMatch> matches; TF_RETURN_IF_ERROR(matcher.GetOpTypeMatches(pattern, &matches)); std::set<string> matched_nodes; std::map<string, const NodeMatch*> matches_by_head_name; for (const NodeMatch& match : matches) { matches_by_head_name[match.node.name()] = &match; RecordMatchedNodes(match, &matched_nodes); } std::map<string, std::vector<const NodeDef*>> outputs_map; MapNodesToOutputs(input_graph_def, &outputs_map); output_graph_def->Clear(); for (const NodeDef& input_node : input_graph_def.node()) { if (matches_by_head_name.count(input_node.name())) { const NodeMatch* match = matches_by_head_name[input_node.name()]; std::vector<NodeDef> matched_nodes_array; MatchedNodesAsArray(*match, &matched_nodes_array); std::set<string> matched_nodes_lookup; for (const NodeDef& matched_node : matched_nodes_array) { matched_nodes_lookup.insert(matched_node.name()); } std::set<string> input_nodes; std::set<string> output_nodes; for (const NodeDef& matched_node : matched_nodes_array) { for (const string& input_name : matched_node.input()) { string input_node_name = NodeNameFromInput(input_name); if (!matched_nodes_lookup.count(input_node_name)) { input_nodes.insert(matched_node.name()); } } if (outputs_map.count(matched_node.name())) { for (const NodeDef* dependent_node : outputs_map[matched_node.name()]) { if (!matched_nodes_lookup.count(dependent_node->name())) { output_nodes.insert(matched_node.name()); } } } } std::vector<NodeDef> new_nodes; TF_RETURN_IF_ERROR( node_generator(*match, input_nodes, output_nodes, &new_nodes)); std::set<string> new_node_names; for (const NodeDef& new_node : new_nodes) { new_node_names.insert(new_node.name()); } bool abort_replacement = false; if (!options.allow_inconsistencies) { for (const string& expected_output : output_nodes) { if (!new_node_names.count(expected_output)) { LOG(WARNING) << "Expected " << expected_output << " to be preserved."; abort_replacement = true; } } } if (abort_replacement) { LOG(WARNING) << "Generator function didn't preserve needed nodes, " << "copying old replacements back in instead."; std::vector<NodeDef> old_nodes; MatchedNodesAsArray(*match, &old_nodes); for (const NodeDef& old_node : old_nodes) { NodeDef* added_node = output_graph_def->mutable_node()->Add(); *added_node = old_node; } } else { for (const NodeDef& new_node : new_nodes) { NodeDef* added_node = output_graph_def->mutable_node()->Add(); *added_node = new_node; } } } else if (!matched_nodes.count(input_node.name())) { NodeDef* added_node = output_graph_def->mutable_node()->Add(); *added_node = input_node; } else { } } return OkStatus(); } Status RenameNodeInputs(const GraphDef& input_graph_def, const std::map<string, string>& inputs_to_rename, const std::unordered_set<string>& nodes_to_ignore, GraphDef* output_graph_def) { std::map<string, std::vector<std::pair<string, string>>> canonical_inputs_to_rename; for (const auto& input_to_rename : inputs_to_rename) { canonical_inputs_to_rename[NodeNameFromInput(input_to_rename.first)] .push_back({input_to_rename.first, input_to_rename.second}); } output_graph_def->Clear(); for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; new_node->mutable_input()->Clear(); for (const string& input_name : node.input()) { std::set<string> already_visited; string new_input_name = input_name; while ( canonical_inputs_to_rename.count(NodeNameFromInput(new_input_name))) { string input_node_name = NodeNameFromInput(new_input_name); if (already_visited.count(input_node_name)) { return errors::InvalidArgument( "RenameNodeInputs argument contains a cycle for ", input_node_name); } already_visited.insert(input_node_name); if (nodes_to_ignore.count(node.name())) { break; } bool any_match_found = false; for (const std::pair<string, string>& input_to_rename : canonical_inputs_to_rename.at(input_node_name)) { const string& source_name = input_to_rename.first; const string& dest_name = input_to_rename.second; bool is_match; string match_name; if (str_util::EndsWith(source_name, ":*")) { is_match = true; string prefix; string unused_node_name; string suffix; NodeNamePartsFromInput(new_input_name, &prefix, &unused_node_name, &suffix); match_name = prefix + dest_name + suffix; } else { is_match = (CanonicalInputName(source_name) == CanonicalInputName(new_input_name)); match_name = dest_name; } if (is_match) { new_input_name = match_name; any_match_found = true; } } if (!any_match_found) { break; } } *(new_node->mutable_input()->Add()) = new_input_name; } } return OkStatus(); } void CopyOriginalMatch(const NodeMatch& match, std::vector<NodeDef>* new_nodes) { std::vector<NodeDef> old_nodes; MatchedNodesAsArray(match, &old_nodes); for (const NodeDef& old_node : old_nodes) { new_nodes->push_back(old_node); } } TransformRegistry* GetTransformRegistry() { static TransformRegistry transform_registry; return &transform_registry; } void FindInvalidInputs(const GraphDef& graph_def, std::vector<std::pair<string, string>>* invalid_inputs) { std::map<string, const NodeDef*> node_map; MapNamesToNodes(graph_def, &node_map); for (const NodeDef& node : graph_def.node()) { for (const string& input : node.input()) { string input_node = NodeNameFromInput(input); if (!node_map.count(input_node)) { invalid_inputs->push_back({node.name(), input_node}); } } } } Status IsGraphValid(const GraphDef& graph_def) { std::vector<std::pair<string, string>> invalid_inputs; FindInvalidInputs(graph_def, &invalid_inputs); if (!invalid_inputs.empty()) { std::map<string, const NodeDef*> node_map; MapNamesToNodes(graph_def, &node_map); for (const std::pair<string, string>& invalid_input : invalid_inputs) { LOG(ERROR) << "Invalid input " << invalid_input.second << " for node " << invalid_input.first << " - " << node_map[invalid_input.first]->DebugString(); } return errors::Internal( "Invalid graph with inputs referring to nonexistent nodes"); } return OkStatus(); } Status GetInOutTypes(const NodeDef& node_def, DataTypeVector* inputs, DataTypeVector* outputs) { const OpDef* op_def; TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def)); TF_RETURN_IF_ERROR(InOutTypesForNode(node_def, *op_def, inputs, outputs)); return OkStatus(); } Status TensorShapeFromString(const string& shape_string, TensorShape* result) { if (shape_string.empty()) { return errors::InvalidArgument("Specified shape is empty."); } std::vector<string> dims_as_str = str_util::Split(shape_string, ","); std::vector<int64_t> dims; for (const string& dim : dims_as_str) { int64_t tmp; if (strings::safe_strto64(dim, &tmp)) { dims.push_back(tmp); } else { return errors::InvalidArgument("Could parse as shape: '", shape_string, "'"); } } *result = TensorShape(dims); return OkStatus(); } int TransformFuncContext::CountParameters(const string& name) const { if (params.count(name)) { return params.at(name).size(); } else { return 0; } } Status TransformFuncContext::GetOneStringParameter(const string& name, const string& default_value, string* result) const { const int params_count = CountParameters(name); if (params_count == 0) { *result = default_value; return OkStatus(); } else if (params_count == 1) { *result = params.at(name).at(0); return OkStatus(); } else { return errors::InvalidArgument("Expected a single '", name, "' parameter, but found ", params_count, " occurrences"); } } Status TransformFuncContext::GetOneInt32Parameter(const string& name, int32_t default_value, int32* result) const { const int params_count = CountParameters(name); if (params_count == 0) { *result = default_value; return OkStatus(); } string string_value; TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value)); if (!strings::safe_strto32(StringPiece(string_value), result)) { return errors::InvalidArgument("Couldn't interpret the ", name, " argument as a number:", string_value); } return OkStatus(); } Status TransformFuncContext::GetOneInt64Parameter(const string& name, int64_t default_value, int64_t* result) const { const int params_count = CountParameters(name); if (params_count == 0) { *result = default_value; return OkStatus(); } string string_value; TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value)); if (!strings::safe_strto64(StringPiece(string_value), result)) { return errors::InvalidArgument("Couldn't interpret the ", name, " argument as a number:", string_value); } return OkStatus(); } Status TransformFuncContext::GetOneFloatParameter(const string& name, float default_value, float* result) const { const int params_count = CountParameters(name); if (params_count == 0) { *result = default_value; return OkStatus(); } string string_value; TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value)); if (!strings::safe_strtof(string_value.c_str(), result)) { return errors::InvalidArgument( "Couldn't interpret the ", name, " argument as a float number:", string_value); } return OkStatus(); } Status TransformFuncContext::GetOneBoolParameter(const string& name, bool default_value, bool* result) const { const int params_count = CountParameters(name); if (params_count == 0) { *result = default_value; return OkStatus(); } string string_value; TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value)); if (string_value == "true" || string_value == "1") { *result = true; } else if (string_value == "false" || string_value == "0") { *result = false; } else { return errors::InvalidArgument("Couldn't interpret the ", name, " argument as a boolean:", string_value, " (expected true, false, 0 or 1)"); } return OkStatus(); } } }
#include "tensorflow/tools/graph_transforms/transform_utils.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace graph_transforms { class TransformUtilsTest : public ::testing::Test { protected: void TestMapNamesToNodes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); std::map<string, const NodeDef*> node_map; MapNamesToNodes(graph_def, &node_map); EXPECT_EQ(1, node_map.count("a")); EXPECT_EQ(1, node_map.count("b")); EXPECT_EQ(1, node_map.count("add")); EXPECT_EQ(1, node_map.count("placeholder")); EXPECT_EQ(1, node_map.count("output")); EXPECT_EQ(0, node_map.count("no_such_node")); } void TestMapNodesToOutputs() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); std::map<string, std::vector<const NodeDef*>> outputs_map; MapNodesToOutputs(graph_def, &outputs_map); EXPECT_EQ(1, outputs_map.count("a")); EXPECT_EQ(1, outputs_map["a"].size()); EXPECT_EQ("add", outputs_map["a"][0]->name()); EXPECT_EQ(1, outputs_map.count("b")); EXPECT_EQ(1, outputs_map["b"].size()); EXPECT_EQ("add", outputs_map["b"][0]->name()); EXPECT_EQ(1, outputs_map.count("add")); EXPECT_EQ(1, outputs_map["add"].size()); EXPECT_EQ("output", outputs_map["add"][0]->name()); EXPECT_EQ(1, outputs_map.count("placeholder")); EXPECT_EQ(1, outputs_map["placeholder"].size()); EXPECT_EQ("output", outputs_map["placeholder"][0]->name()); EXPECT_EQ(0, outputs_map.count("output")); EXPECT_EQ(0, outputs_map.count("no_such_node")); } void TestNodeNamePartsFromInput() { string prefix; string node_name; string suffix; NodeNamePartsFromInput("some_node_name", &prefix, &node_name, &suffix); EXPECT_EQ("", prefix); EXPECT_EQ("some_node_name", node_name); EXPECT_EQ("", suffix); NodeNamePartsFromInput("some_node_name/with/slashes", &prefix, &node_name, &suffix); EXPECT_EQ("", prefix); EXPECT_EQ("some_node_name/with/slashes", node_name); EXPECT_EQ("", suffix); NodeNamePartsFromInput("some_node_name:0", &prefix, &node_name, &suffix); EXPECT_EQ("", prefix); EXPECT_EQ("some_node_name", node_name); EXPECT_EQ(":0", suffix); NodeNamePartsFromInput("^some_node_name", &prefix, &node_name, &suffix); EXPECT_EQ("^", prefix); EXPECT_EQ("some_node_name", node_name); EXPECT_EQ("", suffix); NodeNamePartsFromInput("^some_node_name:99", &prefix, &node_name, &suffix); EXPECT_EQ("^", prefix); EXPECT_EQ("some_node_name", node_name); EXPECT_EQ(":99", suffix); } void TestNodeNameFromInput() { EXPECT_EQ("node_name", NodeNameFromInput("node_name")); EXPECT_EQ("node_name", NodeNameFromInput("node_name:0")); EXPECT_EQ("node_name", NodeNameFromInput("^node_name")); EXPECT_EQ("node_name", NodeNameFromInput("^node_name:42")); } void TestCanonicalInputName() { EXPECT_EQ("node_name:0", CanonicalInputName("node_name")); EXPECT_EQ("node_name:0", CanonicalInputName("node_name:0")); EXPECT_EQ("^node_name:0", CanonicalInputName("^node_name")); EXPECT_EQ("^node_name:42", CanonicalInputName("^node_name:42")); } void TestAddNodeInput() { NodeDef node; AddNodeInput("foo", &node); EXPECT_EQ("foo", node.input(0)); } void TestCopyNodeAttr() { NodeDef node; auto mutable_attr = node.mutable_attr(); (*mutable_attr)["foo"].set_i(3); NodeDef copied_node; CopyNodeAttr(node, "foo", "bar", &copied_node); EXPECT_EQ(3, copied_node.attr().at("bar").i()); } void TestSetNodeAttr() { NodeDef node; int32_t value_i = 32; SetNodeAttr("foo", value_i, &node); EXPECT_EQ(32, node.attr().at("foo").i()); string value_s = "some_value"; SetNodeAttr("bar", value_s, &node); EXPECT_EQ("some_value", node.attr().at("bar").s()); } void TestSetNodeTensorAttr() { NodeDef node; SetNodeTensorAttr<int32>("foo", {3, 1}, {1, 2, 3}, &node); TensorProto tensor_proto = node.attr().at("foo").tensor(); Tensor tensor; CHECK(tensor.FromProto(tensor_proto)); EXPECT_EQ(DT_INT32, tensor.dtype()); EXPECT_EQ(3, tensor.shape().dim_size(0)); EXPECT_EQ(1, tensor.shape().dim_size(1)); EXPECT_EQ(1, tensor.flat<int32>()(0)); EXPECT_EQ(2, tensor.flat<int32>()(1)); EXPECT_EQ(3, tensor.flat<int32>()(2)); } void TestSetNodeTensorAttrWithTensor() { NodeDef node; Tensor input_tensor(DT_INT32, {4, 5}); test::FillIota<int32>(&input_tensor, 1); SetNodeTensorAttr<int32>("foo", input_tensor, &node); TensorProto tensor_proto = node.attr().at("foo").tensor(); Tensor tensor; CHECK(tensor.FromProto(tensor_proto)); test::ExpectTensorEqual<int32>(input_tensor, tensor); } void TestGetNodeTensorAttr() { NodeDef node; Tensor input_tensor(DT_INT32, {4, 5}); test::FillIota<int32>(&input_tensor, 1); TensorProto tensor_proto; input_tensor.AsProtoTensorContent(&tensor_proto); SetNodeAttr("foo", tensor_proto, &node); Tensor result = GetNodeTensorAttr(node, "foo"); test::ExpectTensorEqual<int32>(input_tensor, result); } void TestFilterGraphDef() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); Output remove_me = Add(root.WithOpName("remove_me"), mul, add); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef result_graph_def; FilterGraphDef( graph_def, [](const NodeDef& node) { return (node.name() != "remove_me"); }, &result_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(result_graph_def, &node_map); EXPECT_EQ(1, node_map.count("a")); EXPECT_EQ(1, node_map.count("b")); EXPECT_EQ(1, node_map.count("add")); EXPECT_EQ(1, node_map.count("placeholder")); EXPECT_EQ(1, node_map.count("output")); EXPECT_EQ(0, node_map.count("remove_me")); } void TestRemoveAttributes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef result_graph_def; RemoveAttributes(graph_def, {"dtype"}, &result_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(result_graph_def, &node_map); const NodeDef* removed_placeholder = node_map["placeholder"]; EXPECT_EQ(nullptr, tensorflow::AttrSlice(*removed_placeholder).Find("dtype")); } void TestGetOpTypeMatches() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphMatcher matcher(graph_def); std::vector<NodeMatch> const_matches; TF_ASSERT_OK(matcher.GetOpTypeMatches({"Const"}, &const_matches)); EXPECT_EQ(2, const_matches.size()); for (const NodeMatch& match : const_matches) { EXPECT_EQ("Const", match.node.op()); EXPECT_TRUE(("a" == match.node.name()) || ("b" == match.node.name())) << "match.node.name()=" << match.node.name(); } std::vector<NodeMatch> add_matches; TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add"}, &add_matches)); EXPECT_EQ(1, add_matches.size()); EXPECT_EQ("Add", add_matches[0].node.op()); EXPECT_EQ("add", add_matches[0].node.name()); std::vector<NodeMatch> add_child_matches; TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add", {{"Const"}, {"Const"}}}, &add_child_matches)); EXPECT_EQ(1, add_child_matches.size()); EXPECT_EQ("Add", add_child_matches[0].node.op()); EXPECT_EQ("add", add_child_matches[0].node.name()); EXPECT_EQ(2, add_child_matches[0].inputs.size()); for (const NodeMatch& match : add_child_matches[0].inputs) { EXPECT_EQ("Const", match.node.op()); EXPECT_TRUE(("a" == match.node.name()) || ("b" == match.node.name())) << "match.node.name()=" << match.node.name(); } std::vector<NodeMatch> no_such_matches; TF_ASSERT_OK(matcher.GetOpTypeMatches({"NoSuch"}, &no_such_matches)); EXPECT_EQ(0, no_such_matches.size()); std::vector<NodeMatch> all_matches; TF_ASSERT_OK(matcher.GetOpTypeMatches( {"Mul", {{"Add", {{"Const"}, {"Const"}}}, {"Placeholder"}}}, &all_matches)); EXPECT_EQ(1, all_matches.size()); EXPECT_EQ("Mul", all_matches[0].node.op()); EXPECT_EQ("output", all_matches[0].node.name()); EXPECT_EQ(2, all_matches[0].inputs.size()); EXPECT_EQ("Add", all_matches[0].inputs[0].node.op()); EXPECT_EQ("add", all_matches[0].inputs[0].node.name()); EXPECT_EQ(2, all_matches[0].inputs[0].inputs.size()); EXPECT_EQ("Const", all_matches[0].inputs[0].inputs[0].node.op()); EXPECT_EQ("a", all_matches[0].inputs[0].inputs[0].node.name()); EXPECT_EQ(0, all_matches[0].inputs[0].inputs[0].inputs.size()); EXPECT_EQ("Const", all_matches[0].inputs[0].inputs[1].node.op()); EXPECT_EQ("b", all_matches[0].inputs[0].inputs[1].node.name()); EXPECT_EQ(0, all_matches[0].inputs[0].inputs[1].inputs.size()); EXPECT_EQ("Placeholder", all_matches[0].inputs[1].node.op()); EXPECT_EQ("placeholder", all_matches[0].inputs[1].node.name()); EXPECT_EQ(0, all_matches[0].inputs[1].inputs.size()); std::vector<NodeMatch> wildcard_matches; TF_ASSERT_OK( matcher.GetOpTypeMatches({"*", {{"*"}, {"*"}}}, &wildcard_matches)); EXPECT_EQ(1, wildcard_matches.size()); EXPECT_EQ("Add", wildcard_matches[0].node.op()); EXPECT_EQ("Const", wildcard_matches[0].inputs[0].node.op()); EXPECT_EQ("a", wildcard_matches[0].inputs[0].node.name()); EXPECT_EQ("Const", wildcard_matches[0].inputs[1].node.op()); EXPECT_EQ("b", wildcard_matches[0].inputs[1].node.name()); std::vector<NodeMatch> or_matches; TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add|Mul"}, &or_matches)); EXPECT_EQ(2, or_matches.size()); EXPECT_EQ("Add", or_matches[0].node.op()); EXPECT_EQ("add", or_matches[0].node.name()); EXPECT_EQ("Mul", or_matches[1].node.op()); EXPECT_EQ("output", or_matches[1].node.name()); } void TestGetOpTypeMatchesDAG() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Output add = Add(root.WithOpName("add"), a_const, a_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphMatcher matcher(graph_def); std::vector<NodeMatch> add_matches; TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add", {{"Const"}, {"Const"}}}, &add_matches)); EXPECT_EQ(1, add_matches.size()); EXPECT_EQ("Add", add_matches[0].node.op()); EXPECT_EQ("add", add_matches[0].node.name()); EXPECT_EQ("Const", add_matches[0].inputs[0].node.op()); EXPECT_EQ("a", add_matches[0].inputs[0].node.name()); EXPECT_EQ("Const", add_matches[0].inputs[1].node.op()); EXPECT_EQ("a", add_matches[0].inputs[1].node.name()); } void TestReplaceMatchingOpTypes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef replaced_graph_def; TF_ASSERT_OK(ReplaceMatchingOpTypes( graph_def, {"*"}, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { NodeDef original_copy; original_copy = match.node; const string original_name = match.node.name(); original_copy.set_name(original_name + "_before_identity"); new_nodes->push_back(original_copy); NodeDef identity_node; identity_node.set_op("Identity"); identity_node.set_name(original_name); *(identity_node.mutable_input()->Add()) = original_copy.name(); new_nodes->push_back(identity_node); return OkStatus(); }, {}, &replaced_graph_def)); EXPECT_EQ(10, replaced_graph_def.node_size()); for (const NodeDef& node : replaced_graph_def.node()) { if (node.name() == "output") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("output_before_identity", node.input(0)); } else if (node.name() == "output_before_identity") { EXPECT_EQ("Mul", node.op()); EXPECT_EQ("add", node.input(0)); EXPECT_EQ("placeholder", node.input(1)); } else if (node.name() == "placeholder") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("placeholder_before_identity", node.input(0)); } else if (node.name() == "placeholder_before_identity") { EXPECT_EQ("Placeholder", node.op()); } else if (node.name() == "add") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("add_before_identity", node.input(0)); } else if (node.name() == "add_before_identity") { EXPECT_EQ("Add", node.op()); EXPECT_EQ("a", node.input(0)); EXPECT_EQ("b", node.input(1)); } else if (node.name() == "a") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("a_before_identity", node.input(0)); } else if (node.name() == "a_before_identity") { EXPECT_EQ("Const", node.op()); } else if (node.name() == "b") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ("b_before_identity", node.input(0)); } else if (node.name() == "b_before_identity") { EXPECT_EQ("Const", node.op()); } else { EXPECT_EQ(true, false) << "Unexpected node name found: " << node.name(); } } } void TestMatchedNodesAsArray() { NodeMatch fourth; fourth.node.set_name("fourth"); NodeMatch second; second.node.set_name("second"); second.inputs.push_back(fourth); NodeMatch third; third.node.set_name("third"); third.inputs.push_back(fourth); NodeMatch first; first.node.set_name("first"); first.inputs.push_back(second); first.inputs.push_back(third); std::vector<NodeDef> result; MatchedNodesAsArray(first, &result); EXPECT_EQ(4, result.size()); EXPECT_EQ("first", result[0].name()); EXPECT_EQ("second", result[1].name()); EXPECT_EQ("third", result[2].name()); EXPECT_EQ("fourth", result[3].name()); } void TestRenameNodeInputs() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add"), a_const, a_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef renamed_graph_def; TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"a", "b"}}, std::unordered_set<string>(), &renamed_graph_def)); std::map<string, const NodeDef*> node_map; MapNamesToNodes(renamed_graph_def, &node_map); EXPECT_EQ("b", node_map.at("add")->input(0)); EXPECT_EQ("b", node_map.at("add")->input(1)); } void TestRenameNodeInputsWithRedirects() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Tensor c_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_data, 1.0f); Output c_const = Const(root.WithOpName("c"), Input::Initializer(c_data)); Output add = Add(root.WithOpName("add"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef renamed_graph_def; TF_ASSERT_OK(RenameNodeInputs( graph_def, {{"a", "f"}, {"f", "e"}, {"e", "d"}, {"d", "c"}}, std::unordered_set<string>(), &renamed_graph_def)); std::map<string, const NodeDef*> node_map; MapNamesToNodes(renamed_graph_def, &node_map); EXPECT_EQ("c", node_map.at("add")->input(0)); EXPECT_EQ("b", node_map.at("add")->input(1)); } void TestRenameNodeInputsWithCycle() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Tensor c_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_data, 1.0f); Output c_const = Const(root.WithOpName("c"), Input::Initializer(c_data)); Output add = Add(root.WithOpName("add"), a_const, b_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("output"), add, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef renamed_graph_def; Status rename_status = RenameNodeInputs(graph_def, {{"a", "d"}, {"d", "a"}}, std::unordered_set<string>(), &renamed_graph_def); EXPECT_FALSE(rename_status.ok()); } void TestRenameNodeInputsWithWildcard() { auto root = tensorflow::Scope::DisabledShapeInferenceScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); QuantizeV2 quantize_a(root.WithOpName("quantize_a"), a_const, a_const, a_const, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); QuantizeV2 quantize_b(root.WithOpName("quantize_b"), b_const, b_const, b_const, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output add = Add(root.WithOpName("add"), quantize_a.output_min, quantize_a.output_max); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef renamed_graph_def; TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"quantize_a:*", "quantize_b"}}, std::unordered_set<string>(), &renamed_graph_def)); std::map<string, const NodeDef*> node_map; MapNamesToNodes(renamed_graph_def, &node_map); EXPECT_EQ("quantize_b:1", node_map.at("add")->input(0)); EXPECT_EQ("quantize_b:2", node_map.at("add")->input(1)); } void TestRenameNodeInputsWithIgnores() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data)); Output add = Add(root.WithOpName("add"), a_const, a_const); Output add2 = Add(root.WithOpName("add2"), a_const, a_const); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output mul = Mul(root.WithOpName("mul"), add, placeholder); Output mul2 = Mul(root.WithOpName("output"), mul, add2); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); GraphDef renamed_graph_def; TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"a", "b"}}, {"add2"}, &renamed_graph_def)); std::map<string, const NodeDef*> node_map; MapNamesToNodes(renamed_graph_def, &node_map); EXPECT_EQ("b", node_map.at("add")->input(0)); EXPECT_EQ("b", node_map.at("add")->input(1)); EXPECT_EQ("a", node_map.at("add2")->input(0)); EXPECT_EQ("a", node_map.at("add2")->input(1)); } void TestFindInvalidInputs() { GraphDef graph_def; NodeDef* mul_node = graph_def.mutable_node()->Add(); mul_node->set_op("Mul"); mul_node->set_name("mul_node"); *(mul_node->mutable_input()->Add()) = "add_node1"; *(mul_node->mutable_input()->Add()) = "add_node2:0"; *(mul_node->mutable_input()->Add()) = "^const_node1:0"; NodeDef* add_node1 = graph_def.mutable_node()->Add(); add_node1->set_op("Add"); add_node1->set_name("add_node1"); *(add_node1->mutable_input()->Add()) = "missing_input1"; *(add_node1->mutable_input()->Add()) = "const_node1:0"; *(add_node1->mutable_input()->Add()) = "missing_input2"; NodeDef* add_node2 = graph_def.mutable_node()->Add(); add_node2->set_op("Add"); add_node2->set_name("add_node2"); *(add_node2->mutable_input()->Add()) = "missing_input3"; *(add_node2->mutable_input()->Add()) = "const_node1:0"; *(add_node2->mutable_input()->Add()) = "^const_node2"; NodeDef* const_node1 = graph_def.mutable_node()->Add(); const_node1->set_op("Const"); const_node1->set_name("const_node1"); NodeDef* const_node2 = graph_def.mutable_node()->Add(); const_node2->set_op("Const"); const_node2->set_name("const_node2"); std::vector<std::pair<string, string>> invalid_inputs; FindInvalidInputs(graph_def, &invalid_inputs); EXPECT_EQ(3, invalid_inputs.size()); for (const std::pair<string, string>& invalid_input : invalid_inputs) { EXPECT_TRUE((invalid_input.first == "add_node1") || (invalid_input.first == "add_node2")); if (invalid_input.first == "add_node1") { EXPECT_TRUE((invalid_input.second == "missing_input1") || (invalid_input.second == "missing_input2")) << invalid_input.second; } else if (invalid_input.first == "add_node2") { EXPECT_EQ("missing_input3", invalid_input.second); } } } void TestIsGraphValid() { GraphDef invalid_graph_def; NodeDef* mul_node = invalid_graph_def.mutable_node()->Add(); mul_node->set_op("Mul"); mul_node->set_name("mul_node"); *(mul_node->mutable_input()->Add()) = "add_node1"; *(mul_node->mutable_input()->Add()) = "add_node2:0"; *(mul_node->mutable_input()->Add()) = "^const_node1:0"; NodeDef* add_node1 = invalid_graph_def.mutable_node()->Add(); add_node1->set_op("Add"); add_node1->set_name("add_node1"); *(add_node1->mutable_input()->Add()) = "missing_input1"; *(add_node1->mutable_input()->Add()) = "const_node1:0"; *(add_node1->mutable_input()->Add()) = "missing_input2"; NodeDef* add_node2 = invalid_graph_def.mutable_node()->Add(); add_node2->set_op("Add"); add_node2->set_name("add_node2"); *(add_node2->mutable_input()->Add()) = "missing_input3"; *(add_node2->mutable_input()->Add()) = "const_node1:0"; *(add_node2->mutable_input()->Add()) = "^const_node2"; NodeDef* const_node1 = invalid_graph_def.mutable_node()->Add(); const_node1->set_op("Const"); const_node1->set_name("const_node1"); NodeDef* const_node2 = invalid_graph_def.mutable_node()->Add(); const_node2->set_op("Const"); const_node2->set_name("const_node2"); EXPECT_FALSE(IsGraphValid(invalid_graph_def).ok()); GraphDef valid_graph_def; NodeDef* const_node3 = valid_graph_def.mutable_node()->Add(); const_node3->set_op("Const"); const_node3->set_name("const_node2"); EXPECT_TRUE(IsGraphValid(valid_graph_def).ok()); } void TestGetInOutTypes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 20; Tensor float_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&float_data, 1.0f); Output float_const = Const(root.WithOpName("float_const"), Input::Initializer(float_data)); Tensor int_data(DT_INT32, TensorShape({width})); test::FillIota<int32>(&int_data, 1); Output int_const = Const(root.WithOpName("int_const"), Input::Initializer(int_data)); Output float_relu = Relu(root.WithOpName("float_relu"), float_const); Output int_relu = Relu(root.WithOpName("int_relu"), int_const); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); std::map<string, const NodeDef*> node_map; MapNamesToNodes(graph_def, &node_map); const NodeDef* float_const_def = node_map.at("float_const"); DataTypeVector float_const_inputs; DataTypeVector float_const_outputs; TF_EXPECT_OK(GetInOutTypes(*float_const_def, &float_const_inputs, &float_const_outputs)); ASSERT_EQ(0, float_const_inputs.size()); ASSERT_EQ(1, float_const_outputs.size()); EXPECT_EQ(DT_FLOAT, float_const_outputs[0]); const NodeDef* int_const_def = node_map.at("int_const"); DataTypeVector int_const_inputs; DataTypeVector int_const_outputs; TF_EXPECT_OK( GetInOutTypes(*int_const_def, &int_const_inputs, &int_const_outputs)); ASSERT_EQ(0, int_const_inputs.size()); ASSERT_EQ(1, int_const_outputs.size()); EXPECT_EQ(DT_INT32, int_const_outputs[0]); const NodeDef* float_relu_def = node_map.at("float_relu"); DataTypeVector float_relu_inputs; DataTypeVector float_relu_outputs; TF_EXPECT_OK(GetInOutTypes(*float_relu_def, &float_relu_inputs, &float_relu_outputs)); ASSERT_EQ(1, float_relu_inputs.size()); EXPECT_EQ(DT_FLOAT, float_relu_inputs[0]); ASSERT_EQ(1, float_relu_outputs.size()); EXPECT_EQ(DT_FLOAT, float_relu_outputs[0]); const NodeDef* int_relu_def = node_map.at("int_relu"); DataTypeVector int_relu_inputs; DataTypeVector int_relu_outputs; TF_EXPECT_OK( GetInOutTypes(*int_relu_def, &int_relu_inputs, &int_relu_outputs)); ASSERT_EQ(1, int_relu_inputs.size()); EXPECT_EQ(DT_INT32, int_relu_inputs[0]); ASSERT_EQ(1, int_relu_outputs.size()); EXPECT_EQ(DT_INT32, int_relu_outputs[0]); } void TestCopyOriginalMatch() { NodeDef a; a.set_op("Relu"); a.set_name("a"); AddNodeInput("b", &a); NodeDef b; b.set_op("Const"); b.set_name("b"); NodeMatch b_match; b_match.node = b; NodeMatch a_match; a_match.node = a; a_match.inputs.push_back(b_match); std::vector<NodeDef> new_nodes; CopyOriginalMatch(a_match, &new_nodes); EXPECT_EQ(2, new_nodes.size()); EXPECT_EQ("a", new_nodes[0].name()); EXPECT_EQ("Relu", new_nodes[0].op()); EXPECT_EQ("b", new_nodes[1].name()); EXPECT_EQ("Const", new_nodes[1].op()); } void TestHashNodeDef() { using namespace ::tensorflow::ops; const int width = 10; auto a_root = tensorflow::Scope::NewRootScope(); Tensor a_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(a_root.WithOpName("a"), Input::Initializer(a_data)); GraphDef a_graph_def; TF_ASSERT_OK(a_root.ToGraphDef(&a_graph_def)); const NodeDef& a_node_def = a_graph_def.node(0); auto b_root = tensorflow::Scope::NewRootScope(); Tensor b_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_data, 1.0f); Output b_const = Const(b_root.WithOpName("a"), Input::Initializer(b_data)); GraphDef b_graph_def; TF_ASSERT_OK(b_root.ToGraphDef(&b_graph_def)); const NodeDef& b_node_def = b_graph_def.node(0); auto c_root = tensorflow::Scope::NewRootScope(); Tensor c_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_data, 2.0f); Output c_const = Const(c_root.WithOpName("a"), Input::Initializer(c_data)); GraphDef c_graph_def; TF_ASSERT_OK(c_root.ToGraphDef(&c_graph_def)); const NodeDef& c_node_def = c_graph_def.node(0); auto d_root = tensorflow::Scope::NewRootScope(); Tensor d_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&d_data, 1.0f); Output d_const = Const(d_root.WithOpName("d"), Input::Initializer(d_data)); GraphDef d_graph_def; TF_ASSERT_OK(d_root.ToGraphDef(&d_graph_def)); const NodeDef& d_node_def = d_graph_def.node(0); auto e_root = tensorflow::Scope::NewRootScope(); Tensor e_data(DT_INT32, TensorShape({width})); test::FillIota<int32>(&e_data, 1); Output e_const = Const(e_root.WithOpName("a"), Input::Initializer(e_data)); GraphDef e_graph_def; TF_ASSERT_OK(e_root.ToGraphDef(&e_graph_def)); const NodeDef& e_node_def = e_graph_def.node(0); auto f_root = tensorflow::Scope::NewRootScope(); Tensor f_data(DT_FLOAT, TensorShape({width - 1})); test::FillIota<float>(&f_data, 1.0f); Output f_const = Const(f_root.WithOpName("a"), Input::Initializer(f_data)); GraphDef f_graph_def; TF_ASSERT_OK(f_root.ToGraphDef(&f_graph_def)); const NodeDef& f_node_def = f_graph_def.node(0); auto g_root = tensorflow::Scope::NewRootScope(); Tensor g_data(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&g_data, 1); Output g_const = Const(g_root.WithOpName("a").WithDevice("some_device"), Input::Initializer(g_data)); GraphDef g_graph_def; TF_ASSERT_OK(g_root.ToGraphDef(&g_graph_def)); const NodeDef& g_node_def = g_graph_def.node(0); NodeDef relu1_node_def; relu1_node_def.set_op("Relu"); relu1_node_def.set_name("a"); relu1_node_def.add_input("foo"); NodeDef relu2_node_def; relu2_node_def.set_op("Relu"); relu2_node_def.set_name("a"); relu2_node_def.add_input("bar"); EXPECT_EQ(HashNodeDef(a_node_def), HashNodeDef(b_node_def)); EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(c_node_def)); EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(d_node_def)); EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(e_node_def)); EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(f_node_def)); EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(g_node_def)); EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(relu1_node_def)); EXPECT_NE(HashNodeDef(relu1_node_def), HashNodeDef(relu2_node_def)); } void TestCountParameters() { TransformFuncContext context; context.params.insert({"foo", {"a", "b"}}); context.params.insert({"bar", {"c"}}); EXPECT_EQ(2, context.CountParameters("foo")); EXPECT_EQ(1, context.CountParameters("bar")); EXPECT_EQ(0, context.CountParameters("not_present")); } void TestGetOneStringParameter() { TransformFuncContext context; context.params.insert({"foo", {"a", "b"}}); context.params.insert({"bar", {"c"}}); string value; TF_EXPECT_OK(context.GetOneStringParameter("bar", "d", &value)); EXPECT_EQ("c", value); EXPECT_FALSE(context.GetOneStringParameter("foo", "d", &value).ok()); TF_EXPECT_OK(context.GetOneStringParameter("not_present", "d", &value)); EXPECT_EQ("d", value); } void TestGetOneInt32Parameter() { TransformFuncContext context; context.params.insert({"foo", {"10", "20"}}); context.params.insert({"bar", {"-23"}}); context.params.insert({"not_a_number", {"not_numerical"}}); context.params.insert({"float", {"-23.232323"}}); int32_t value; TF_EXPECT_OK(context.GetOneInt32Parameter("bar", 0, &value)); EXPECT_EQ(-23, value); EXPECT_FALSE(context.GetOneInt32Parameter("foo", 0, &value).ok()); TF_EXPECT_OK(context.GetOneInt32Parameter("not_present", 10, &value)); EXPECT_EQ(10, value); EXPECT_FALSE(context.GetOneInt32Parameter("not_a_number", 0, &value).ok()); EXPECT_FALSE(context.GetOneInt32Parameter("float", 0, &value).ok()); } void TestGetOneInt64Parameter() { TransformFuncContext context; context.params.insert({"foo", {"10", "20"}}); context.params.insert({"bar", {"-23"}}); context.params.insert({"not_a_number", {"not_numerical"}}); context.params.insert({"float", {"-23.232323"}}); int64_t value; TF_EXPECT_OK(context.GetOneInt64Parameter("bar", 0, &value)); EXPECT_EQ(-23, value); EXPECT_FALSE(context.GetOneInt64Parameter("foo", 0, &value).ok()); TF_EXPECT_OK(context.GetOneInt64Parameter("not_present", 10, &value)); EXPECT_EQ(10, value); EXPECT_FALSE(context.GetOneInt64Parameter("not_a_number", 0, &value).ok()); EXPECT_FALSE(context.GetOneInt64Parameter("float", 0, &value).ok()); } void TestGetOneFloatParameter() { TransformFuncContext context; context.params.insert({"foo", {"10.0", "20.0"}}); context.params.insert({"bar", {"-23.2323"}}); context.params.insert({"not_a_number", {"not_numerical"}}); float value; TF_EXPECT_OK(context.GetOneFloatParameter("bar", 0, &value)); EXPECT_NEAR(-23.2323f, value, 1e-5f); EXPECT_FALSE(context.GetOneFloatParameter("foo", 0, &value).ok()); TF_EXPECT_OK(context.GetOneFloatParameter("not_present", 10.5f, &value)); EXPECT_NEAR(10.5f, value, 1e-5f); EXPECT_FALSE(context.GetOneFloatParameter("not_a_number", 0, &value).ok()); } void TestGetOneBoolParameter() { TransformFuncContext context; context.params.insert({"foo", {"true", "false"}}); context.params.insert({"true", {"true"}}); context.params.insert({"false", {"false"}}); context.params.insert({"one", {"1"}}); context.params.insert({"zero", {"0"}}); context.params.insert({"not_a_bool", {"not_boolean"}}); bool value; EXPECT_FALSE(context.GetOneBoolParameter("foo", 0, &value).ok()); value = false; TF_EXPECT_OK(context.GetOneBoolParameter("true", false, &value)); EXPECT_TRUE(value); value = true; TF_EXPECT_OK(context.GetOneBoolParameter("false", true, &value)); EXPECT_FALSE(value); value = false; TF_EXPECT_OK(context.GetOneBoolParameter("one", false, &value)); EXPECT_TRUE(value); value = true; TF_EXPECT_OK(context.GetOneBoolParameter("zero", true, &value)); EXPECT_FALSE(value); EXPECT_FALSE(context.GetOneBoolParameter("not_a_bool", false, &value).ok()); value = false; TF_EXPECT_OK(context.GetOneBoolParameter("not_present", true, &value)); EXPECT_TRUE(value); } }; TEST_F(TransformUtilsTest, TestMapNamesToNodes) { TestMapNamesToNodes(); } TEST_F(TransformUtilsTest, TestMapNodesToOutputs) { TestMapNodesToOutputs(); } TEST_F(TransformUtilsTest, TestNodeNamePartsFromInput) { TestNodeNamePartsFromInput(); } TEST_F(TransformUtilsTest, TestCanonicalInputName) { TestCanonicalInputName(); } TEST_F(TransformUtilsTest, TestAddNodeInput) { TestAddNodeInput(); } TEST_F(TransformUtilsTest, TestCopyNodeAttr) { TestCopyNodeAttr(); } TEST_F(TransformUtilsTest, TestSetNodeAttr) { TestSetNodeAttr(); } TEST_F(TransformUtilsTest, TestSetNodeTensorAttr) { TestSetNodeTensorAttr(); } TEST_F(TransformUtilsTest, TestSetNodeTensorAttrWithTensor) { TestSetNodeTensorAttrWithTensor(); } TEST_F(TransformUtilsTest, TestGetNodeTensorAttr) { TestGetNodeTensorAttr(); } TEST_F(TransformUtilsTest, TestNodeNameFromInput) { TestNodeNameFromInput(); } TEST_F(TransformUtilsTest, TestFilterGraphDef) { TestFilterGraphDef(); } TEST_F(TransformUtilsTest, TestRemoveAttributes) { TestRemoveAttributes(); } TEST_F(TransformUtilsTest, TestGetOpTypeMatches) { TestGetOpTypeMatches(); } TEST_F(TransformUtilsTest, TestGetOpTypeMatchesDAG) { TestGetOpTypeMatchesDAG(); } TEST_F(TransformUtilsTest, TestReplaceMatchingOpTypes) { TestReplaceMatchingOpTypes(); } TEST_F(TransformUtilsTest, TestMatchedNodesAsArray) { TestMatchedNodesAsArray(); } TEST_F(TransformUtilsTest, TestRenameNodeInputs) { TestRenameNodeInputs(); } TEST_F(TransformUtilsTest, TestRenameNodeInputsWithRedirects) { TestRenameNodeInputsWithRedirects(); } TEST_F(TransformUtilsTest, TestRenameNodeInputsWithCycle) { TestRenameNodeInputsWithCycle(); } TEST_F(TransformUtilsTest, TestRenameNodeInputsWithWildcard) { TestRenameNodeInputsWithWildcard(); } TEST_F(TransformUtilsTest, TestRenameNodeInputsWithIgnores) { TestRenameNodeInputsWithIgnores(); } TEST_F(TransformUtilsTest, TestFindInvalidInputs) { TestFindInvalidInputs(); } TEST_F(TransformUtilsTest, TestIsGraphValid) { TestIsGraphValid(); } TEST_F(TransformUtilsTest, TestGetInOutTypes) { TestGetInOutTypes(); } TEST_F(TransformUtilsTest, TestCopyOriginalMatch) { TestCopyOriginalMatch(); } TEST_F(TransformUtilsTest, TestHashNodeDef) { TestHashNodeDef(); } TEST_F(TransformUtilsTest, TestCountParameters) { TestCountParameters(); } TEST_F(TransformUtilsTest, TestGetOneStringParameter) { TestGetOneStringParameter(); } TEST_F(TransformUtilsTest, TestGetOneInt32Parameter) { TestGetOneInt32Parameter(); } TEST_F(TransformUtilsTest, TestGetOneInt64Parameter) { TestGetOneInt64Parameter(); } TEST_F(TransformUtilsTest, TestGetOneFloatParameter) { TestGetOneFloatParameter(); } TEST_F(TransformUtilsTest, TestGetOneBoolParameter) { TestGetOneBoolParameter(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/transform_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/transform_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d7587362-ad42-4b12-b651-20d6bd9f85a2
cpp
tensorflow/tensorflow
rename_node
tensorflow/tools/graph_transforms/rename_node.cc
tensorflow/tools/graph_transforms/rename_node_test.cc
#include <string> #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RenameNode(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { if (!context.params.count("old_node_name") || (context.params.at("old_node_name").size() != 1) || !context.params.count("new_node_name") || (context.params.at("new_node_name").size() != 1)) { return errors::InvalidArgument( "rename_node expects exactly one 'old_node_name' and one " "'new_node_name' argument, e.g. " "rename_node(old_attribute_name=super/deep/output, " "new_attribute_name=output)"); } const std::string old_node_name = context.params.at("old_node_name")[0]; const std::string new_node_name = context.params.at("new_node_name")[0]; output_graph_def->Clear(); for (const NodeDef& input_node : input_graph_def.node()) { NodeDef* node = output_graph_def->mutable_node()->Add(); *node = input_node; if (node->name() == new_node_name) { return Status(absl::StatusCode::kInvalidArgument, "A node is alreading using " + new_node_name + "as name."); } if (node->name() == old_node_name) { node->set_name(new_node_name); } for (std::string& input_name : *node->mutable_input()) { std::string prefix; std::string input_node_name; std::string suffix; NodeNamePartsFromInput(input_name, &prefix, &input_node_name, &suffix); if (input_node_name == old_node_name) { std::string new_input_name = prefix + new_node_name + suffix; input_name = new_input_name; } } } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("rename_node", RenameNode); } }
#include <string> #include <utility> #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RenameNode(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); TEST(RenameNodeTest, Rename) { GraphDef in_graph; NodeDef* node = in_graph.add_node(); node->set_name("input"); node->set_op("Placeholder"); NodeDef* node_splitter = in_graph.add_node(); node_splitter->set_name("splitter"); node_splitter->set_op("Split"); NodeDef* node_adder = in_graph.add_node(); node_adder->set_op("Add"); node_adder->set_name("adder"); node_adder->add_input("splitter"); node_adder->add_input("splitter:1"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"adder"}; context.params.insert(std::pair<string, std::vector<string>>( {"old_node_name", {std::string("splitter")}})); context.params.insert(std::pair<string, std::vector<string>>( {"new_node_name", {string("demux")}})); TF_ASSERT_OK(RenameNode(in_graph, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("demux")); EXPECT_EQ(1, node_lookup.count("adder")); EXPECT_EQ(2, node_lookup["adder"]->input().size()); EXPECT_EQ("demux", node_lookup["adder"]->input()[0]); EXPECT_EQ("demux:1", node_lookup["adder"]->input()[1]); } TEST(RenameNodeTest, FailWhenNameAlreadyExists) { GraphDef in_graph; NodeDef* node = in_graph.add_node(); node->set_name("input"); node->set_op("Placeholder"); NodeDef* node_splitter = in_graph.add_node(); node_splitter->set_name("splitter"); node_splitter->set_op("Split"); NodeDef* node_adder = in_graph.add_node(); node_adder->set_op("Add"); node_adder->set_name("adder"); node_adder->add_input("splitter"); node_adder->add_input("splitter:1"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"adder"}; context.params.insert(std::pair<string, std::vector<string>>( {"old_node_name", {std::string("splitter")}})); context.params.insert(std::pair<string, std::vector<string>>( {"new_node_name", {string("adder")}})); EXPECT_FALSE(RenameNode(in_graph, context, &result).ok()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_node.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_node_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c2ba4f36-53a8-4ce4-bbf5-d1fb8053459d
cpp
tensorflow/tensorflow
rename_attribute
tensorflow/tools/graph_transforms/rename_attribute.cc
tensorflow/tools/graph_transforms/rename_attribute_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RenameAttribute(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { if (!context.params.count("old_attribute_name") || (context.params.at("old_attribute_name").size() != 1) || !context.params.count("new_attribute_name") || (context.params.at("new_attribute_name").size() != 1)) { return errors::InvalidArgument( "rename_attribute expects exactly one 'old_attribute_name' and one " "'new_attribute_name' argument, e.g. " "rename_attribute(old_attribute_name=foo, new_attribute_name=bar)"); } string op_name; if (context.params.count("op_name")) { op_name = context.params.at("op_name")[0]; } else { op_name = "*"; } const string old_attribute_name = context.params.at("old_attribute_name")[0]; const string new_attribute_name = context.params.at("new_attribute_name")[0]; output_graph_def->Clear(); for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; if (((op_name == "*") || (op_name == node.op())) && (node.attr().count(old_attribute_name))) { AttrValue attribute_value = node.attr().at(old_attribute_name); new_node->mutable_attr()->erase(old_attribute_name); new_node->mutable_attr()->insert({new_attribute_name, attribute_value}); } } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("rename_attribute", RenameAttribute); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RenameAttribute(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class RenameAttributeTest : public ::testing::Test { protected: void TestRenameAttribute() { GraphDef graph_def; NodeDef* mul_node1 = graph_def.add_node(); mul_node1->set_name("mul_node1"); mul_node1->set_op("Mul"); mul_node1->add_input("add_node2"); mul_node1->add_input("add_node3"); AddNodeAttr("foo", 23, mul_node1); AddNodeAttr("bar", "something", mul_node1); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); AddNodeAttr("foo", 46, add_node2); AddNodeAttr("bob", 23, add_node2); AddNodeAttr("bar", "something else", add_node2); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* add_node4 = graph_def.add_node(); add_node4->set_name("add_node4"); add_node4->set_op("Add"); add_node4->add_input("add_node2"); add_node4->add_input("add_node3"); GraphDef wildcard_result; TransformFuncContext context; context.input_names = {}; context.output_names = {"mul_node1"}; context.params.insert( std::pair<string, std::vector<string>>({"op_name", {string("*")}})); context.params.insert(std::pair<string, std::vector<string>>( {"old_attribute_name", {string("foo")}})); context.params.insert(std::pair<string, std::vector<string>>( {"new_attribute_name", {string("baz")}})); TF_ASSERT_OK(RenameAttribute(graph_def, context, &wildcard_result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(wildcard_result, &node_lookup); EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo")); EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("baz")); EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar")); EXPECT_EQ(0, node_lookup.at("add_node2")->attr().count("foo")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("baz")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob")); GraphDef targeted_result; TransformFuncContext targeted_context; targeted_context.input_names = {}; targeted_context.output_names = {"mul_node1"}; targeted_context.params.insert( std::pair<string, std::vector<string>>({"op_name", {string("Mul")}})); targeted_context.params.insert(std::pair<string, std::vector<string>>( {"old_attribute_name", {string("foo")}})); targeted_context.params.insert(std::pair<string, std::vector<string>>( {"new_attribute_name", {string("baz")}})); TF_ASSERT_OK( RenameAttribute(graph_def, targeted_context, &targeted_result)); MapNamesToNodes(targeted_result, &node_lookup); EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo")); EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("baz")); EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("foo")); EXPECT_EQ(0, node_lookup.at("add_node2")->attr().count("baz")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar")); EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob")); } }; TEST_F(RenameAttributeTest, TestRenameAttribute) { TestRenameAttribute(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_attribute.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_attribute_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a035bbb0-9445-464c-9174-e02ca7f9ceaa
cpp
tensorflow/tensorflow
quantize_weights
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantize_weights_test.cc
#include <memory> #include <optional> #include <utility> #include "llvm/Support/Casting.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/Quant/IR/Quant.h" #include "mlir/Dialect/Quant/IR/QuantTypes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectRegistry.h" #include "mlir/IR/Matchers.h" #include "mlir/IR/OpDefinition.h" #include "mlir/IR/OperationSupport.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/Value.h" #include "mlir/IR/Verifier.h" #include "mlir/IR/Visitors.h" #include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Rewrite/FrozenRewritePatternSet.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Support/TypeID.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { namespace quant { namespace { class QuantizeWeightsPass : public mlir::PassWrapper<QuantizeWeightsPass, OperationPass<ModuleOp>> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizeWeightsPass) explicit QuantizeWeightsPass() : test_mode_(true) { initializeForTest(); } explicit QuantizeWeightsPass( const tensorflow::quantization::QuantizationOptions& quant_options) : test_mode_(false), quant_options_(quant_options) {} QuantizeWeightsPass(const QuantizeWeightsPass& other) { test_mode_ = other.test_mode_; quant_options_ = other.quant_options_; initializeForTest(); } StringRef getArgument() const final { return "quant-quantize-weights"; } StringRef getDescription() const final { return "Quantize weights used by quantizable ops."; } void getDependentDialects(DialectRegistry& registry) const override { registry.insert<TF::TensorFlowDialect, quant::QuantDialect>(); } private: void runOnOperation() override; bool test_mode_; tensorflow::quantization::QuantizationOptions quant_options_; void initializeForTest() { if (!test_mode_) return; tensorflow::quantization::QuantizationComponentSpec quant_spec; quant_spec.set_quantization_component( tensorflow::quantization::QuantizationComponentSpec::COMPONENT_WEIGHT); quant_spec.set_tensor_type( tensorflow::quantization::QuantizationComponentSpec::TENSORTYPE_INT_8); auto mutable_quant_method = quant_options_.mutable_quantization_method(); *mutable_quant_method->add_quantization_component_specs() = quant_spec; } }; class QuantizeConstWeights : public OpRewritePattern<TF::ConstOp> { public: explicit QuantizeConstWeights( MLIRContext* context, const tensorflow::quantization::QuantizationOptions& quantization_options) : OpRewritePattern<TF::ConstOp>(context), quant_options_(quantization_options) {} LogicalResult matchAndRewrite(TF::ConstOp op, PatternRewriter& rewriter) const override { auto weight_component_spec = GetWeightComponentSpec(quant_options_); if (!weight_component_spec) return failure(); if (failed((isQuantizableWeight(op)))) { return failure(); } if (failed(quantizeOps(rewriter, op, weight_component_spec.value()))) { return failure(); } return success(); } private: bool checkIfAnyUserIsConnectedToTermiantor(BlockArgument op) const { for (const auto& user : op.getUsers()) { if (user->template hasTrait<OpTrait::IsTerminator>()) return true; if (auto next_user = dyn_cast_or_null<TF::IdentityOp>(user)) { return (*(next_user->getResult(0).getUsers().begin())) ->template hasTrait<OpTrait::IsTerminator>(); } } return false; } bool hasUsageFromQuantizableOp(TF::ConstOp op) const { llvm::SmallVector<mlir::Value> uses_at_current_level{op}; while (!uses_at_current_level.empty()) { llvm::SmallVector<mlir::Value> next_values_to_visit; for (auto cur_op : uses_at_current_level) { for (auto& cur_op_use : cur_op.getUses()) { Operation* next_op = cur_op_use.getOwner(); int next_op_operand_num = cur_op_use.getOperandNumber(); if (auto call_op = llvm::dyn_cast<mlir::CallOpInterface>(next_op)) { mlir::func::FuncOp func = llvm::dyn_cast<mlir::func::FuncOp>(call_op.resolveCallable()); if (!func) continue; next_values_to_visit.push_back( func.getArgument(next_op_operand_num)); } else if (auto while_op = llvm::dyn_cast_or_null<TF::WhileOp>(next_op)) { func::FuncOp func = while_op.body_function(); auto func_argument = func.getArgument(next_op_operand_num); if (checkIfAnyUserIsConnectedToTermiantor(func_argument)) next_values_to_visit.push_back( func.getArgument(next_op_operand_num)); } else if (IsOpWithQuantizableTrait(next_op)) { return true; } else if (IsOpWithDataMovementTrait(next_op)) { next_values_to_visit.insert(next_values_to_visit.end(), next_op->getResults().begin(), next_op->getResults().end()); } } } uses_at_current_level.swap(next_values_to_visit); } return false; } LogicalResult isQuantizableWeight(TF::ConstOp op) const { if (!IsValueWithQuantizablePrecision(op)) return failure(); if (!hasUsageFromQuantizableOp(op)) return failure(); int num_elements_threshold = quant_options_.min_num_elements_for_weights(); int num_elements = cast<ShapedType>(op.getType()).getNumElements(); if (num_elements < num_elements_threshold) { op->emitRemark("Quantization is skipped because the op has ") << num_elements << " elements which is fewer than the threshold(" << num_elements_threshold << " elements)."; return failure(); } return success(); } LogicalResult quantizeOps(PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_component_spec) const { if (weight_component_spec.tensor_type() == tensorflow::quantization::QuantizationComponentSpec::TENSORTYPE_INT_8) { auto dequantized_val = ApplyUniformQuantization(rewriter, op, weight_component_spec); if (!dequantized_val.has_value()) return failure(); op.getOutput().replaceAllUsesWith(dequantized_val.value().getResult(0)); return success(); } op->emitRemark("Not supported quantization data type."); return failure(); } protected: tensorflow::quantization::QuantizationOptions quant_options_; }; static PassRegistration<QuantizeWeightsPass> pass; void QuantizeWeightsPass::runOnOperation() { MLIRContext* ctx = &getContext(); auto module_op = getOperation(); RewritePatternSet patterns(ctx); patterns.add<QuantizeConstWeights>(ctx, quant_options_); FrozenRewritePatternSet frozen_patterns(std::move(patterns)); for (auto func : module_op.getOps<func::FuncOp>()) { if (failed(applyPatternsAndFoldGreedily(func, frozen_patterns))) { func.emitError() << "quant-quantize-weights failed."; signalPassFailure(); } } } } std::unique_ptr<OperationPass<ModuleOp>> CreateQuantizeWeightsPass( const tensorflow::quantization::QuantizationOptions& quant_options) { return std::make_unique<QuantizeWeightsPass>(quant_options); } } }
#include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantize_weights.h" #include <cstddef> #include <cstdint> #include <cstdlib> #include <iostream> #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "flatbuffers/flatbuffer_builder.h" #include "flatbuffers/vector.h" #include "tensorflow/compiler/mlir/lite/core/absl_error_model_builder.h" #include "tensorflow/compiler/mlir/lite/quantization/lite/test_util.h" #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h" #include "tensorflow/compiler/mlir/lite/schema/schema_utils.h" #include "xla/tsl/util/command_line_flags.h" #include "tsl/platform/init_main.h" #include "tsl/platform/path.h" namespace { std::string* g_test_model_dir = nullptr; } namespace mlir { namespace lite { namespace toco_legacy { namespace { using mlir::TFL::FlatBufferModelAbslError; using tflite::BuiltinOperator_CONV_2D; using tflite::BuiltinOperator_CUSTOM; using tflite::BuiltinOperator_DEQUANTIZE; using tflite::GetModel; using tflite::Model; using tflite::TensorType_FLOAT16; using tflite::TensorType_FLOAT32; using tflite::TensorType_INT8; std::unique_ptr<FlatBufferModelAbslError> ReadTestModel() { auto model_path = tsl::io::JoinPath( *g_test_model_dir, ::mlir::lite::internal::kConvModelWith0Plus10Weights); return FlatBufferModelAbslError::BuildFromFile(model_path.c_str()); } std::unique_ptr<FlatBufferModelAbslError> ReadSharedWeightsTestModel() { auto model_path = tsl::io::JoinPath( *g_test_model_dir, ::mlir::lite::internal::kModelWithSharedWeights); return FlatBufferModelAbslError::BuildFromFile(model_path.c_str()); } std::unique_ptr<FlatBufferModelAbslError> ReadGatherTestModel() { auto model_path = tsl::io::JoinPath( *g_test_model_dir, ::mlir::lite::internal::kQuantizedWithGather); return FlatBufferModelAbslError::BuildFromFile(model_path.c_str()); } std::unique_ptr<FlatBufferModelAbslError> ReadCustomOpTestModel() { auto model_path = tsl::io::JoinPath( *g_test_model_dir, ::mlir::lite::internal::kModelWithCustomOp); return FlatBufferModelAbslError::BuildFromFile(model_path.c_str()); } template <typename T> std::vector<T> GetAsVector(const flatbuffers::Vector<T>* vec) { return std::vector<T>(vec->begin(), vec->end()); } class QuantizeWeightsTest : public testing::Test { protected: QuantizeWeightsTest() = default; void LoadBasicModel() { input_model_ = ReadTestModel(); model_ = input_model_->GetModel(); } void LoadSharedWeightsModel() { input_model_ = ReadSharedWeightsTestModel(); model_ = input_model_->GetModel(); } void LoadGatherTestModel() { input_model_ = ReadGatherTestModel(); model_ = input_model_->GetModel(); } void LoadCustomOpTestModel() { input_model_ = ReadCustomOpTestModel(); model_ = input_model_->GetModel(); } std::unique_ptr<FlatBufferModelAbslError> input_model_; const Model* model_; bool IsModelInputOrOutput(const Model* model, uint32_t tensor_idx) { for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); ++subgraph_idx) { const auto subgraph = model->subgraphs()->Get(subgraph_idx); for (size_t i = 0; i < subgraph->inputs()->size(); ++i) { if (subgraph->inputs()->Get(i) == tensor_idx) { return true; } } for (size_t i = 0; i < subgraph->outputs()->size(); ++i) { if (subgraph->outputs()->Get(i) == tensor_idx) { return true; } } } return false; } bool GetProducerOpCode(const Model* model, uint32_t subgraph_idx, uint32_t tensor_idx, tflite::BuiltinOperator* op_code) { const auto subgraph = model->subgraphs()->Get(subgraph_idx); for (size_t op_idx = 0; op_idx < subgraph->operators()->size(); ++op_idx) { const auto op = subgraph->operators()->Get(op_idx); for (size_t i = 0; i < op->outputs()->size(); ++i) { if (op->outputs()->Get(i) == tensor_idx) { const uint32_t op_code_idx = op->opcode_index(); *op_code = GetBuiltinCode(model->operator_codes()->Get(op_code_idx)); return true; } } } return false; } }; TEST_F(QuantizeWeightsTest, QuantizationSucceeds) { LoadBasicModel(); flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE( QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); } TEST_F(QuantizeWeightsTest, WeightsMinNumElements) { LoadBasicModel(); flatbuffers::FlatBufferBuilder builder; const uint64_t kWeightsMinNumElements = 1000000; ASSERT_TRUE(QuantizeWeights(&builder, model_, kWeightsMinNumElements, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); subgraph_idx++) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); const auto float_graph = model_->subgraphs()->Get(subgraph_idx); ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size()); for (size_t i = 0; i < quantized_graph->tensors()->size(); i++) { const auto quant_tensor = quantized_graph->tensors()->Get(i); const auto float_tensor = float_graph->tensors()->Get(i); EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer()); EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable()); EXPECT_EQ(GetAsVector(quant_tensor->shape()), GetAsVector(float_tensor->shape())); EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str()); EXPECT_EQ(quant_tensor->type(), float_tensor->type()); } } } TEST_F(QuantizeWeightsTest, HybridConv) { LoadBasicModel(); flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE( QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); subgraph_idx++) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); const auto float_graph = model_->subgraphs()->Get(subgraph_idx); ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size()); ASSERT_EQ(quantized_graph->operators()->size(), 1); const auto op = quantized_graph->operators()->Get(0); const uint32_t op_code_idx = op->opcode_index(); ASSERT_EQ(GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)), BuiltinOperator_CONV_2D); for (size_t i = 0; i < quantized_graph->tensors()->size(); i++) { const auto quant_tensor = quantized_graph->tensors()->Get(i); const auto float_tensor = float_graph->tensors()->Get(i); EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer()); EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable()); EXPECT_EQ(GetAsVector(quant_tensor->shape()), GetAsVector(float_tensor->shape())); EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str()); if (quant_tensor->name()->str() == "conv_bias") { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (IsModelInputOrOutput(output_model, i)) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (quant_tensor->buffer() != 0) { EXPECT_EQ(quant_tensor->type(), TensorType_INT8) << quant_tensor->name()->str(); auto shape = GetAsVector(quant_tensor->shape()); if (kUseUpdatedHybridSchemeDefault) { EXPECT_EQ(quant_tensor->quantization()->scale()->size(), shape[0]); } else { EXPECT_EQ(quant_tensor->quantization()->scale()->size(), 1); } } else { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } } } } TEST_F(QuantizeWeightsTest, DequantizeConv) { LoadBasicModel(); flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE(internal::QuantizeWeights(&builder, model_, 0, false, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); ++subgraph_idx) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); const auto float_graph = model_->subgraphs()->Get(subgraph_idx); ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size() + 1); int32_t dequant_input_idx = -1; int32_t dequant_output_idx = -1; for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); if (GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)) == BuiltinOperator_DEQUANTIZE) { dequant_input_idx = op->inputs()->Get(0); dequant_output_idx = op->outputs()->Get(0); } } ASSERT_GT(dequant_input_idx, -1); ASSERT_GT(dequant_output_idx, -1); for (size_t i = 0; i < quantized_graph->tensors()->size(); ++i) { const auto quant_tensor = quantized_graph->tensors()->Get(i); if (i == dequant_input_idx) { EXPECT_EQ(quant_tensor->type(), TensorType_INT8); } else if (i == dequant_output_idx) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (IsModelInputOrOutput(output_model, i)) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (quant_tensor->name()->str() == "conv_bias") { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (quant_tensor->buffer() != 0) { EXPECT_EQ(quant_tensor->type(), TensorType_INT8); } else { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } } } } TEST_F(QuantizeWeightsTest, DequantizeConvFloat16) { LoadBasicModel(); flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE(QuantizeWeights(&builder, model_, BufferType::QUANTIZED_FLOAT16, kUseUpdatedHybridSchemeDefault, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); ++subgraph_idx) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); const auto float_graph = model_->subgraphs()->Get(subgraph_idx); ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size() + 2); int32_t dequant_input_idx = -1; int32_t dequant_output_idx = -1; for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); if (GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)) == BuiltinOperator_DEQUANTIZE) { dequant_input_idx = op->inputs()->Get(0); dequant_output_idx = op->outputs()->Get(0); } } ASSERT_GT(dequant_input_idx, -1); ASSERT_GT(dequant_output_idx, -1); for (size_t i = 0; i < quantized_graph->tensors()->size(); ++i) { const auto quant_tensor = quantized_graph->tensors()->Get(i); if (i == dequant_input_idx) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT16); } else if (i == dequant_output_idx) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (IsModelInputOrOutput(output_model, i)) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (quant_tensor->name()->str() == "conv_bias") { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT16); } else if (quant_tensor->buffer() != 0) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT16); } else { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } } } } TEST_F(QuantizeWeightsTest, SharedWeights_Hybrid) { LoadSharedWeightsModel(); flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE( QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); uint32_t num_conv_ops = 0; for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); ++subgraph_idx) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); const auto op_code = GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)); if (op_code == BuiltinOperator_CONV_2D) { num_conv_ops++; const auto weights_tensor = quantized_graph->tensors()->Get(op->inputs()->Get(1)); EXPECT_EQ(weights_tensor->type(), TensorType_INT8); } } } EXPECT_EQ(num_conv_ops, 2); } TEST_F(QuantizeWeightsTest, SharedWeights_Dequantize) { LoadSharedWeightsModel(); flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE(internal::QuantizeWeights(&builder, model_, 0, false, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); uint32_t num_conv_ops = 0; for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); ++subgraph_idx) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); const auto op_code = GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)); if (op_code == BuiltinOperator_CONV_2D) { num_conv_ops++; uint32_t weights_tensor_index = op->inputs()->Get(1); const auto weights_tensor = quantized_graph->tensors()->Get(weights_tensor_index); EXPECT_EQ(weights_tensor->type(), TensorType_FLOAT32); BuiltinOperator producer_op_code; ASSERT_TRUE(GetProducerOpCode(output_model, subgraph_idx, weights_tensor_index, &producer_op_code)); EXPECT_EQ(producer_op_code, BuiltinOperator_DEQUANTIZE); } } } EXPECT_EQ(num_conv_ops, 2); } TEST_F(QuantizeWeightsTest, VerifyGatherQuantization) { LoadGatherTestModel(); flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE( QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); ++subgraph_idx) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); const auto op_code = GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)); if (op_code == tflite::BuiltinOperator_GATHER) { uint32_t input_tensor_index = op->inputs()->Get(0); const auto weights_tensor = quantized_graph->tensors()->Get(input_tensor_index); EXPECT_EQ(weights_tensor->type(), TensorType_INT8); } } } } TEST_F(QuantizeWeightsTest, VerifyCustomOpQuantizationDequantize) { LoadCustomOpTestModel(); CustomOpMap custom_op_map; custom_op_map["CustomTestOp"] = { .quantizable_input_indices = {1}, .is_hybrid = false, }; flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); const auto quantized_graph = output_model->subgraphs()->Get(0); ASSERT_EQ(quantized_graph->operators()->size(), model_->subgraphs()->Get(0)->operators()->size() + 1); int num_custom_ops_found = 0; for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); const auto op_code = GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)); if (op_code == BuiltinOperator_CUSTOM) { uint32_t weights_tensor_index = op->inputs()->Get(1); const auto weights_tensor = quantized_graph->tensors()->Get(weights_tensor_index); EXPECT_EQ(weights_tensor->type(), TensorType_FLOAT32); BuiltinOperator producer_op_code; ASSERT_TRUE(GetProducerOpCode(output_model, 0, weights_tensor_index, &producer_op_code)); EXPECT_EQ(producer_op_code, BuiltinOperator_DEQUANTIZE); num_custom_ops_found++; } } EXPECT_EQ(num_custom_ops_found, 1); } TEST_F(QuantizeWeightsTest, VerifyCustomOpQuantizationHybrid) { LoadCustomOpTestModel(); CustomOpMap custom_op_map; custom_op_map["CustomTestOp"] = { .quantizable_input_indices = {1}, .is_hybrid = true, }; flatbuffers::FlatBufferBuilder builder; ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); const auto quantized_graph = output_model->subgraphs()->Get(0); ASSERT_EQ(quantized_graph->operators()->size(), model_->subgraphs()->Get(0)->operators()->size()); int num_custom_ops_found = 0; for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); const auto op_code = GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)); if (op_code == BuiltinOperator_CUSTOM) { uint32_t weights_tensor_index = op->inputs()->Get(1); const auto weights_tensor = quantized_graph->tensors()->Get(weights_tensor_index); EXPECT_EQ(weights_tensor->type(), TensorType_INT8); num_custom_ops_found++; } } EXPECT_EQ(num_custom_ops_found, 1); } TEST_F(QuantizeWeightsTest, VerifyUpdatedHybridSchemeFalseQuantizationHybrid) { LoadBasicModel(); flatbuffers::FlatBufferBuilder builder; const CustomOpMap custom_op_map; ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map, false, {}, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); subgraph_idx++) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); const auto float_graph = model_->subgraphs()->Get(subgraph_idx); ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size()); ASSERT_EQ(quantized_graph->operators()->size(), 1); const auto op = quantized_graph->operators()->Get(0); const uint32_t op_code_idx = op->opcode_index(); ASSERT_EQ(GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)), BuiltinOperator_CONV_2D); for (size_t i = 0; i < quantized_graph->tensors()->size(); i++) { const auto quant_tensor = quantized_graph->tensors()->Get(i); const auto float_tensor = float_graph->tensors()->Get(i); EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer()); EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable()); EXPECT_EQ(GetAsVector(quant_tensor->shape()), GetAsVector(float_tensor->shape())); EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str()); if (quant_tensor->name()->str() == "conv_bias") { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (IsModelInputOrOutput(output_model, i)) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (quant_tensor->buffer() != 0) { EXPECT_EQ(quant_tensor->type(), TensorType_INT8) << quant_tensor->name()->str(); auto shape = GetAsVector(quant_tensor->shape()); EXPECT_EQ(quant_tensor->quantization()->scale()->size(), 1); } else { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } } } } TEST_F(QuantizeWeightsTest, DequantizeConvBlocklisted) { LoadBasicModel(); flatbuffers::FlatBufferBuilder builder; const CustomOpMap custom_op_map; ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map, true, {BuiltinOperator_CONV_2D}, QuantizerType::OLD_QUANTIZER) .ok()); const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); ++subgraph_idx) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx); const auto float_graph = model_->subgraphs()->Get(subgraph_idx); ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size() + 1); int32_t dequant_input_idx = -1; int32_t dequant_output_idx = -1; for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) { const auto op = quantized_graph->operators()->Get(i); const uint32_t op_code_idx = op->opcode_index(); if (GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)) == BuiltinOperator_DEQUANTIZE) { dequant_input_idx = op->inputs()->Get(0); dequant_output_idx = op->outputs()->Get(0); } } ASSERT_GT(dequant_input_idx, -1); ASSERT_GT(dequant_output_idx, -1); for (size_t i = 0; i < quantized_graph->tensors()->size(); ++i) { const auto quant_tensor = quantized_graph->tensors()->Get(i); if (i == dequant_input_idx) { EXPECT_EQ(quant_tensor->type(), TensorType_INT8); EXPECT_EQ(quant_tensor->quantization()->scale()->size(), 5); EXPECT_EQ(quant_tensor->quantization()->quantized_dimension(), 0); } else if (i == dequant_output_idx) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (IsModelInputOrOutput(output_model, i)) { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (quant_tensor->name()->str() == "conv_bias") { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } else if (quant_tensor->buffer() != 0) { EXPECT_EQ(quant_tensor->type(), TensorType_INT8); } else { EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); } } } } } } } } int main(int argc, char** argv) { std::string model_file; const std::vector<tsl::Flag> flag_list = { tsl::Flag("test_model_file", &model_file, "Path to test tflite model file."), }; const bool parse_result = tsl::Flags::Parse(&argc, argv, flag_list); if (!parse_result) { std::cerr << "Required test_model_file\n"; std::abort(); } g_test_model_dir = new std::string(tsl::io::Dirname(model_file)); ::tsl::port::InitMain(argv[0], &argc, &argv); return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantize_weights_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0db8354f-3004-4343-b70d-579abe43b572
cpp
tensorflow/tensorflow
add_default_attributes
tensorflow/tools/graph_transforms/add_default_attributes.cc
tensorflow/tools/graph_transforms/add_default_attributes_test.cc
#include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status AddDefaultAttributes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::unique_ptr<FunctionLibraryDefinition> flib_def( new FunctionLibraryDefinition(OpRegistry::Global(), input_graph_def.library())); *output_graph_def = input_graph_def; TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(output_graph_def, *flib_def, 0)); return OkStatus(); } REGISTER_GRAPH_TRANSFORM("add_default_attributes", AddDefaultAttributes); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status AddDefaultAttributes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class AddDefaultAttributesTest : public ::testing::Test { protected: void TestAddDefaultAttributes() { GraphDef graph_def; NodeDef* lrn_node1 = graph_def.add_node(); lrn_node1->set_name("lrn_node1"); lrn_node1->set_op("LRN"); NodeDef* lrn_node2 = graph_def.add_node(); lrn_node2->set_name("lrn_node2"); lrn_node2->set_op("LRN"); SetNodeAttr("depth_radius", 7, lrn_node2); SetNodeAttr("bias", 2.0f, lrn_node2); SetNodeAttr("alpha", 2.0f, lrn_node2); SetNodeAttr("beta", 1.0f, lrn_node2); GraphDef result; TF_ASSERT_OK(AddDefaultAttributes(graph_def, {}, &result)); std::map<string, const NodeDef*> nodes; MapNamesToNodes(result, &nodes); EXPECT_EQ(5, nodes.at("lrn_node1")->attr().at("depth_radius").i()); EXPECT_NEAR(1.0f, nodes.at("lrn_node1")->attr().at("bias").f(), 1e-5f); EXPECT_NEAR(1.0f, nodes.at("lrn_node1")->attr().at("alpha").f(), 1e-5f); EXPECT_NEAR(0.5f, nodes.at("lrn_node1")->attr().at("beta").f(), 1e-5f); EXPECT_EQ(7, nodes.at("lrn_node2")->attr().at("depth_radius").i()); EXPECT_NEAR(2.0f, nodes.at("lrn_node2")->attr().at("bias").f(), 1e-5f); EXPECT_NEAR(2.0f, nodes.at("lrn_node2")->attr().at("alpha").f(), 1e-5f); EXPECT_NEAR(1.0f, nodes.at("lrn_node2")->attr().at("beta").f(), 1e-5f); } }; TEST_F(AddDefaultAttributesTest, TestAddDefaultAttributes) { TestAddDefaultAttributes(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/add_default_attributes.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/add_default_attributes_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9d6bf915-a5e4-4331-9539-03281654d5cd
cpp
tensorflow/tensorflow
set_device
tensorflow/tools/graph_transforms/set_device.cc
tensorflow/tools/graph_transforms/set_device_test.cc
#include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status SetDevice(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { string new_device; TF_RETURN_IF_ERROR(context.GetOneStringParameter("device", "", &new_device)); bool if_default; TF_RETURN_IF_ERROR( context.GetOneBoolParameter("if_default", false, &if_default)); output_graph_def->Clear(); for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; if (!if_default || (node.device().empty())) { new_node->set_device(new_device); } } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("set_device", SetDevice); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status SetDevice(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); namespace { GraphDef CreateDeviceGraph() { GraphDef graph_def; NodeDef* mul_node1 = graph_def.add_node(); mul_node1->set_name("mul_node1"); mul_node1->set_op("Mul"); mul_node1->set_device("/device:CPU:0"); mul_node1->add_input("add_node2"); mul_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); add_node2->set_device("/device:GPU:1"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* add_node4 = graph_def.add_node(); add_node4->set_name("add_node4"); add_node4->set_op("Add"); add_node4->add_input("add_node2"); add_node4->add_input("add_node3"); return graph_def; } } TEST(SetDeviceTest, TestSetDevice) { GraphDef graph_def = CreateDeviceGraph(); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"mul_node1"}; context.params.insert(std::pair<string, std::vector<string>>( {"device", {string("/device:CPU:0")}})); TF_ASSERT_OK(SetDevice(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ("/device:CPU:0", node_lookup.at("mul_node1")->device()); EXPECT_EQ("/device:CPU:0", node_lookup.at("add_node2")->device()); EXPECT_EQ("/device:CPU:0", node_lookup.at("add_node3")->device()); EXPECT_EQ("/device:CPU:0", node_lookup.at("const_node1")->device()); EXPECT_EQ("/device:CPU:0", node_lookup.at("const_node2")->device()); EXPECT_EQ("/device:CPU:0", node_lookup.at("const_node3")->device()); EXPECT_EQ("/device:CPU:0", node_lookup.at("add_node4")->device()); } TEST(SetDeviceTest, TestSetDeviceIfDefault) { GraphDef graph_def = CreateDeviceGraph(); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"mul_node1"}; context.params.insert(std::pair<string, std::vector<string>>( {"device", {string("/device:GPU:0")}})); context.params.insert( std::pair<string, std::vector<string>>({"if_default", {string("true")}})); TF_ASSERT_OK(SetDevice(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ("/device:CPU:0", node_lookup.at("mul_node1")->device()); EXPECT_EQ("/device:GPU:1", node_lookup.at("add_node2")->device()); EXPECT_EQ("/device:GPU:0", node_lookup.at("add_node3")->device()); EXPECT_EQ("/device:GPU:0", node_lookup.at("const_node1")->device()); EXPECT_EQ("/device:GPU:0", node_lookup.at("const_node2")->device()); EXPECT_EQ("/device:GPU:0", node_lookup.at("const_node3")->device()); EXPECT_EQ("/device:GPU:0", node_lookup.at("add_node4")->device()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/set_device.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/set_device_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
36714099-ba8a-4f52-bf2e-b3079fd2dd83
cpp
tensorflow/tensorflow
strip_unused_nodes
tensorflow/tools/graph_transforms/strip_unused_nodes.cc
tensorflow/tools/graph_transforms/strip_unused_nodes_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { namespace { Status TypeForPlaceholder(const TransformFuncContext& context, const string& node_name, DataType* result) { *result = DT_FLOAT; if (context.params.count("type")) { if (context.params.at("type").size() != 1) { return errors::InvalidArgument( "You must pass no more than one default 'type' to " "strip_unused_nodes"); } const string& type_string = context.params.at("type")[0]; if (!DataTypeFromString(type_string, result)) { return errors::InvalidArgument("Couldn't understand type argument '", type_string, "'"); } } if (context.params.count("name") || context.params.count("type_for_name")) { if (!context.params.count("name") || !context.params.count("type_for_name") || (context.params.at("type_for_name").size() != context.params.at("name").size())) { return errors::InvalidArgument( "You must pass a 'type_for_name' arg for every 'name', e.g. " "strip_unused_nodes(name=foo, type_for_name=float, name=bar, " "type_for_name=quint8"); } const int name_count = context.params.at("name").size(); for (int i = 0; i < name_count; ++i) { if (context.params.at("name")[i] == node_name) { const string& type_string = context.params.at("type_for_name")[i]; if (!DataTypeFromString(type_string, result)) { return errors::InvalidArgument("Couldn't understand type argument '", type_string, "'"); } } } } return OkStatus(); } Status ShapeForPlaceholder(const TransformFuncContext& context, const string& node_name, TensorShape* result) { *result = {}; if (context.params.count("shape")) { if (context.params.at("shape").size() != 1) { return errors::InvalidArgument( "You must pass no more than one default 'shape' to " "strip_unused_nodes"); } const string& shape_string = context.params.at("shape")[0]; TF_RETURN_IF_ERROR(TensorShapeFromString(shape_string, result)); } if (context.params.count("name") || context.params.count("shape_for_name")) { if (!context.params.count("name") || !context.params.count("shape_for_name") || (context.params.at("shape_for_name").size() != context.params.at("name").size())) { return errors::InvalidArgument( "You must pass a 'shape_for_name' arg for every 'name', e.g. " "strip_unused_nodes(name=foo, shape_for_name=\"2,2,1\", name=bar, " "shape_for_name=\"1\""); } const int name_count = context.params.at("name").size(); for (int i = 0; i < name_count; ++i) { if (context.params.at("name")[i] == node_name) { const string& shape_string = context.params.at("shape_for_name")[i]; TF_RETURN_IF_ERROR(TensorShapeFromString(shape_string, result)); } } } return OkStatus(); } } Status StripUnusedNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::set<string> required_nodes; std::set<string> input_nodes; for (const string& input : context.input_names) { required_nodes.insert(NodeNameFromInput(input)); input_nodes.insert(NodeNameFromInput(input)); } for (const string& output : context.output_names) { required_nodes.insert(output); } std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(input_graph_def, &node_lookup); std::vector<string> current_inputs; current_inputs.reserve(context.output_names.size()); for (const string& output_name : context.output_names) { current_inputs.push_back(NodeNameFromInput(output_name)); } while (!current_inputs.empty()) { std::set<string> next_inputs; for (const string& current_input : current_inputs) { required_nodes.insert(current_input); if (input_nodes.count(current_input)) { continue; } if (!node_lookup.count(current_input)) { return errors::InvalidArgument("Input node ", current_input, " not found in graph"); } const NodeDef* current_node = node_lookup[current_input]; for (const string& input_name : current_node->input()) { string input_node_name = NodeNameFromInput(input_name); if (!required_nodes.count(input_node_name)) { next_inputs.insert(input_node_name); } } } current_inputs = std::vector<string>(next_inputs.begin(), next_inputs.end()); } GraphDef filtered_graph_def; FilterGraphDef(input_graph_def, [&](const NodeDef& node) { return required_nodes.count(node.name()) > 0; }, &filtered_graph_def); output_graph_def->Clear(); for (const NodeDef& node : filtered_graph_def.node()) { if (input_nodes.count(node.name())) { NodeDef placeholder_node; if (node.op() == "Placeholder") { placeholder_node = node; } else { placeholder_node.set_op("Placeholder"); placeholder_node.set_name(node.name()); DataType type; TF_RETURN_IF_ERROR(TypeForPlaceholder(context, node.name(), &type)); TensorShape shape; TF_RETURN_IF_ERROR(ShapeForPlaceholder(context, node.name(), &shape)); SetNodeAttr("dtype", type, &placeholder_node); SetNodeAttr("shape", shape, &placeholder_node); } *(output_graph_def->mutable_node()->Add()) = placeholder_node; } else { *(output_graph_def->mutable_node()->Add()) = node; } } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("strip_unused_nodes", StripUnusedNodes); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status StripUnusedNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class StripUnusedNodesTest : public ::testing::Test { protected: void TestSimpleAdd() { GraphDef graph_def; NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("a_node"); add_node->add_input("b_node"); NodeDef* a_node = graph_def.add_node(); a_node->set_name("a_node"); a_node->set_op("Const"); NodeDef* b_node = graph_def.add_node(); b_node->set_name("b_node"); b_node->set_op("Const"); NodeDef* c_node = graph_def.add_node(); c_node->set_name("c_node"); c_node->set_op("Const"); GraphDef result; TF_ASSERT_OK(StripUnusedNodes(graph_def, {{}, {"add_node"}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node")); EXPECT_EQ(1, node_lookup.count("a_node")); EXPECT_EQ(1, node_lookup.count("b_node")); EXPECT_EQ(0, node_lookup.count("c_node")); } void TestCommonAncestor() { GraphDef graph_def; NodeDef* add_node1 = graph_def.add_node(); add_node1->set_name("add_node1"); add_node1->set_op("Add"); add_node1->add_input("add_node2"); add_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* dangling_input = graph_def.add_node(); dangling_input->set_name("dangling_input"); dangling_input->set_op("Const"); NodeDef* add_node4 = graph_def.add_node(); add_node4->set_name("add_node4"); add_node4->set_op("Add"); add_node4->add_input("add_node2"); add_node4->add_input("add_node3"); GraphDef result; TF_ASSERT_OK(StripUnusedNodes( graph_def, {{"dangling_input"}, {"add_node1"}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node1")); EXPECT_EQ(1, node_lookup.count("add_node2")); EXPECT_EQ(1, node_lookup.count("add_node3")); EXPECT_EQ(0, node_lookup.count("add_node4")); EXPECT_EQ(1, node_lookup.count("const_node1")); EXPECT_EQ(1, node_lookup.count("const_node2")); EXPECT_EQ(1, node_lookup.count("const_node3")); EXPECT_EQ(0, node_lookup.count("const_node4")); EXPECT_EQ(1, node_lookup.count("dangling_input")); } void TestSimplePlaceholder() { GraphDef graph_def; NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("mul_node"); add_node->add_input("a_node"); NodeDef* mul_node = graph_def.add_node(); mul_node->set_name("mul_node"); mul_node->set_op("Mul"); mul_node->add_input("b_node"); mul_node->add_input("c_node"); NodeDef* a_node = graph_def.add_node(); a_node->set_name("a_node"); a_node->set_op("Const"); NodeDef* b_node = graph_def.add_node(); b_node->set_name("b_node"); b_node->set_op("Const"); NodeDef* c_node = graph_def.add_node(); c_node->set_name("c_node"); c_node->set_op("Const"); GraphDef result; TF_ASSERT_OK( StripUnusedNodes(graph_def, {{"mul_node"}, {"add_node"}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node")); EXPECT_EQ(1, node_lookup.count("mul_node")); EXPECT_EQ("Placeholder", node_lookup["mul_node"]->op()); EXPECT_EQ(DT_FLOAT, node_lookup["mul_node"]->attr().at("dtype").type()); EXPECT_EQ(TensorShape({}), TensorShape(node_lookup["mul_node"]->attr().at("shape").shape())); EXPECT_EQ(1, node_lookup.count("a_node")); EXPECT_EQ(0, node_lookup.count("b_node")); EXPECT_EQ(0, node_lookup.count("c_node")); } void TestPlaceholderDefaultArgs() { GraphDef graph_def; NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("mul_node"); add_node->add_input("a_node"); NodeDef* mul_node = graph_def.add_node(); mul_node->set_name("mul_node"); mul_node->set_op("Mul"); mul_node->add_input("b_node"); mul_node->add_input("c_node"); NodeDef* a_node = graph_def.add_node(); a_node->set_name("a_node"); a_node->set_op("Const"); NodeDef* b_node = graph_def.add_node(); b_node->set_name("b_node"); b_node->set_op("Const"); NodeDef* c_node = graph_def.add_node(); c_node->set_name("c_node"); c_node->set_op("Const"); GraphDef result; TF_ASSERT_OK(StripUnusedNodes(graph_def, {{"mul_node"}, {"add_node"}, {{"type", {"int32"}}, {"shape", {"1,2,3"}}}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node")); EXPECT_EQ(1, node_lookup.count("mul_node")); EXPECT_EQ("Placeholder", node_lookup["mul_node"]->op()); EXPECT_EQ(DT_INT32, node_lookup["mul_node"]->attr().at("dtype").type()); EXPECT_EQ(TensorShape({1, 2, 3}), TensorShape(node_lookup["mul_node"]->attr().at("shape").shape())); EXPECT_EQ(1, node_lookup.count("a_node")); EXPECT_EQ(0, node_lookup.count("b_node")); EXPECT_EQ(0, node_lookup.count("c_node")); } void TestPlaceholderNamedArgs() { GraphDef graph_def; NodeDef* add_node = graph_def.add_node(); add_node->set_name("add_node"); add_node->set_op("Add"); add_node->add_input("mul_node"); add_node->add_input("a_node"); NodeDef* mul_node = graph_def.add_node(); mul_node->set_name("mul_node"); mul_node->set_op("Mul"); mul_node->add_input("b_node"); mul_node->add_input("c_node"); NodeDef* a_node = graph_def.add_node(); a_node->set_name("a_node"); a_node->set_op("Const"); NodeDef* b_node = graph_def.add_node(); b_node->set_name("b_node"); b_node->set_op("Const"); NodeDef* c_node = graph_def.add_node(); c_node->set_name("c_node"); c_node->set_op("Const"); GraphDef result; TF_ASSERT_OK(StripUnusedNodes(graph_def, {{"mul_node", "a_node"}, {"add_node"}, {{"name", {"a_node", "mul_node"}}, {"type_for_name", {"int64", "quint8"}}, {"shape_for_name", {"1,2", "1, 2, 3"}}}}, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("add_node")); EXPECT_EQ(1, node_lookup.count("mul_node")); EXPECT_EQ("Placeholder", node_lookup["mul_node"]->op()); EXPECT_EQ(DT_QUINT8, node_lookup["mul_node"]->attr().at("dtype").type()); EXPECT_EQ(TensorShape({1, 2, 3}), TensorShape(node_lookup["mul_node"]->attr().at("shape").shape())); EXPECT_EQ(1, node_lookup.count("a_node")); EXPECT_EQ("Placeholder", node_lookup["a_node"]->op()); EXPECT_EQ(DT_INT64, node_lookup["a_node"]->attr().at("dtype").type()); EXPECT_EQ(TensorShape({1, 2}), TensorShape(node_lookup["a_node"]->attr().at("shape").shape())); EXPECT_EQ(0, node_lookup.count("b_node")); EXPECT_EQ(0, node_lookup.count("c_node")); } }; TEST_F(StripUnusedNodesTest, TestSimpleAdd) { TestSimpleAdd(); } TEST_F(StripUnusedNodesTest, TestCommonAncestor) { TestCommonAncestor(); } TEST_F(StripUnusedNodesTest, TestSimplePlaceholder) { TestSimplePlaceholder(); } TEST_F(StripUnusedNodesTest, TestPlaceholderDefaultArgs) { TestPlaceholderDefaultArgs(); } TEST_F(StripUnusedNodesTest, TestPlaceholderNamedArgs) { TestPlaceholderNamedArgs(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/strip_unused_nodes.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/strip_unused_nodes_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
54bb0901-967f-49c3-8af7-a3fac6672659
cpp
tensorflow/tensorflow
sparsify_gather
tensorflow/tools/graph_transforms/sparsify_gather.cc
tensorflow/tools/graph_transforms/sparsify_gather_test.cc
#include <cmath> #include <memory> #include <unordered_map> #include "tensorflow/c/checkpoint_reader.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { using str_util::Split; using str_util::StringReplace; using strings::StrCat; namespace graph_transforms { Status SparsifyWeights(const Tensor& tensor, Tensor* indices_tensor, Tensor* values_tensor) { if (tensor.dims() != 2 || tensor.dim_size(1) != 1) { return tensorflow::errors::FailedPrecondition( "Transform only applicable to subgraph with 'Const' with " "tensor of shape [N, 1]. But instead get shape ", tensor.shape().DebugString(), "."); } auto flat = tensor.flat<float>(); std::vector<int64_t> indices; std::vector<float> values; for (int64_t i = 0; i < flat.size(); i++) { float val = flat(i); if (std::abs(val) >= 1.0e-5) { indices.push_back(i); values.push_back(val); } } if (indices.empty() || values.empty()) { indices.push_back(0); values.push_back(0); } *indices_tensor = Tensor(DataTypeToEnum<int64_t>::value, {static_cast<int64_t>(indices.size())}); std::copy_n(indices.begin(), indices.size(), indices_tensor->flat<int64_t>().data()); *values_tensor = Tensor(DataTypeToEnum<float>::value, {static_cast<int64_t>(values.size())}); std::copy_n(values.begin(), values.size(), values_tensor->flat<float>().data()); return OkStatus(); } void CreateConstNode(const Tensor& tensor, const string& name, NodeDef* node_def) { node_def->set_op("Const"); node_def->set_name(name); SetNodeTensorAttr<float>("value", tensor, node_def); } string GetMonolithicTensorKey(const string& tensor_slice_name) { std::vector<string> names = Split(tensor_slice_name, "/"); if (absl::StartsWith(names[names.size() - 1], "part_")) { CHECK_GE(names.size(), 2); names.pop_back(); } return absl::StrJoin(names, "/"); } Status ObtainTensorSlice(const GraphDef& input_graph_def, const string& target_name, string* shape_slice_string) { string restore_node_name; for (const auto& node : input_graph_def.node()) { std::vector<string> node_name_parts = Split(node.name(), "/"); if (node_name_parts.size() == 2 && absl::StartsWith(node_name_parts[0], "save") && absl::StartsWith(node_name_parts[1], "Assign") && node.input(0) == target_name) { restore_node_name = node.input(1); break; } } std::vector<string> restore_node_parts = Split(restore_node_name, ":"); CHECK_LE(restore_node_parts.size(), 2); string tensor_names_node; string shape_and_slices_node; for (const auto& node : input_graph_def.node()) { if ((node.name() == restore_node_parts[0]) && (node.op() == "RestoreV2")) { tensor_names_node = node.input(1); shape_and_slices_node = node.input(2); break; } } int offset = -1; for (const auto& node : input_graph_def.node()) { if (node.name() == tensor_names_node) { Tensor tensor_names_tensor; TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &tensor_names_tensor)); const auto& tensor_names_value = tensor_names_tensor.flat<tstring>(); for (int i = 0; i < tensor_names_value.size(); i++) { if (tensor_names_value(i) == GetMonolithicTensorKey(target_name)) { offset = i; break; } } } } if (offset == -1) { return errors::Internal("Unable to find RestoreV2 entry for variable: ", target_name); } for (const auto& node : input_graph_def.node()) { if (node.name() == shape_and_slices_node) { Tensor shape_and_slices_tensor; TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &shape_and_slices_tensor)); const auto& shape_and_slices_value = shape_and_slices_tensor.flat<tstring>(); *shape_slice_string = shape_and_slices_value(offset); return OkStatus(); } } return errors::Internal("Unable to find slice for variable: ", target_name); } Status ReadTensorFromCheckpoint( const string& tensor_name, const std::unique_ptr<BundleReader>& ckpt_reader, const string& shape_and_slice, Tensor* tensor) { if (ckpt_reader) { TensorShape parsed_full_shape; TensorSlice parsed_slice; TensorShape parsed_slice_shape; bool get_slice = false; if (!shape_and_slice.empty()) { TF_RETURN_IF_ERROR( checkpoint::ParseShapeAndSlice(shape_and_slice, &parsed_full_shape, &parsed_slice, &parsed_slice_shape)); get_slice = (parsed_full_shape != parsed_slice_shape); } if (get_slice) { TF_RETURN_IF_ERROR(ckpt_reader->LookupSlice( GetMonolithicTensorKey(tensor_name), parsed_slice, tensor)); } else { TF_RETURN_IF_ERROR( ckpt_reader->Lookup(GetMonolithicTensorKey(tensor_name), tensor)); } return OkStatus(); } return errors::Internal("Checkpoint reader was not initialized. "); } Status InitializeCheckpointReader(const TransformFuncContext& context, std::unique_ptr<BundleReader>* ckpt_reader) { if (context.params.count("input_checkpoint")) { const string input_checkpoint = context.params.at("input_checkpoint")[0]; ckpt_reader->reset(new BundleReader(Env::Default(), input_checkpoint)); TF_RETURN_IF_ERROR((*ckpt_reader)->status()); } return OkStatus(); } Status ObtainVariableInfo( const GraphDef& input_graph_def, std::unique_ptr<std::unordered_map<string, string> >* shapes_and_slices) { shapes_and_slices->reset(new std::unordered_map<string, string>()); for (const auto& node : input_graph_def.node()) { if ((node.op() == "Variable") || (node.op() == "VariableV2")) { string s; TF_RETURN_IF_ERROR(ObtainTensorSlice(input_graph_def, node.name(), &s)); (**shapes_and_slices)[node.name()] = s; } } return OkStatus(); } Status RemoveInputAtIndex(NodeDef* n, int index) { for (int i = index; i < n->input_size() - 1; i++) { n->mutable_input()->SwapElements(i, i + 1); } n->mutable_input()->RemoveLast(); return OkStatus(); } Status RemoveNodeAtIndex(GraphDef* g, int index) { for (int i = index; i < g->node_size() - 1; i++) { g->mutable_node()->SwapElements(i, i + 1); } g->mutable_node()->RemoveLast(); return OkStatus(); } Status SparsifyGatherInternal( const GraphDef& input_graph_def, const std::unique_ptr<std::unordered_map<string, string> >& shapes_and_slices, const TransformFuncContext& context, const OpTypePattern& pattern, const std::unique_ptr<BundleReader>& ckpt_reader, GraphDef* output_graph_def) { string group_init_node = "group_deps"; if (context.params.count("group_init_node")) { group_init_node = context.params.at("group_init_node")[0]; } GraphDef current_graph_def = input_graph_def; bool any_match_found = false; std::unordered_map<string, int> refs; for (const auto& node : current_graph_def.node()) { for (const auto& input : node.input()) { auto parsed_input = StringReplace(input, "^", "", true); refs[parsed_input] += 1; } } do { any_match_found = false; GraphDef replaced_graph_def = current_graph_def; std::vector<string> init_table_node_names; std::vector<string> removed_node_names; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( current_graph_def, pattern, [&ckpt_reader, &any_match_found, &init_table_node_names, &shapes_and_slices, &removed_node_names, &refs](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { any_match_found = true; const NodeDef& gather_node = match.node; if (gather_node.op() == "GatherV2") { const NodeDef& axis_node = match.inputs[2].node; Tensor axis_t; TF_RETURN_IF_ERROR(GetNodeAttr(axis_node, "value", &axis_t)); int64_t axis = 0; if (axis_t.dtype() == DT_INT32) { axis = axis_t.scalar<int32>()(); } else if (axis_t.dtype() == DT_INT64) { axis = axis_t.scalar<int64_t>()(); } else { return tensorflow::errors::FailedPrecondition( "Gather axis was not int32 or int64."); } if (axis != 0) { return tensorflow::errors::FailedPrecondition( "Transform only applicable to subgraph with GatherV2 over " "axis 0. Found axis ", axis, "."); } } const NodeDef& weights_node = match.inputs[0].inputs[0].node; DataType data_type; TF_RETURN_IF_ERROR(GetNodeAttr(weights_node, "dtype", &data_type)); if (data_type != DT_FLOAT) { return tensorflow::errors::FailedPrecondition( "Transform only applicable to subgraph with 'Const'," "'Variable', or 'VariableV2' of dtype " "'DT_FLOAT'. Found '" + weights_node.op() + "' with name '", weights_node.name(), "' and dtype '", data_type, "'."); } Tensor weight; if (weights_node.op() == "Const") { weight = GetNodeTensorAttr(weights_node, "value"); } else { TF_RETURN_IF_ERROR(ReadTensorFromCheckpoint( weights_node.name(), ckpt_reader, (*shapes_and_slices)[weights_node.name()], &weight)); } removed_node_names.push_back(weights_node.name()); removed_node_names.push_back(match.inputs[0].node.name()); for (auto input_node : match.inputs[0].node.input()) { auto parsed_input = StringReplace(input_node, "^", "", true); refs[parsed_input]--; } Tensor indices_tensor; Tensor values_tensor; TF_RETURN_IF_ERROR( SparsifyWeights(weight, &indices_tensor, &values_tensor)); DataType key_dtype = DT_INT64; NodeDef indices_node; CreateConstNode(indices_tensor, StrCat(weights_node.name(), "/indices"), &indices_node); SetNodeAttr("dtype", key_dtype, &indices_node); NodeDef values_node; CreateConstNode(values_tensor, StrCat(weights_node.name(), "/values"), &values_node); SetNodeAttr("dtype", data_type, &values_node); NodeDef hashtable_node; hashtable_node.set_op("HashTable"); hashtable_node.set_name(StrCat(weights_node.name(), "/HashTable")); SetNodeAttr("key_dtype", key_dtype, &hashtable_node); SetNodeAttr("value_dtype", data_type, &hashtable_node); NodeDef init_table_node; init_table_node.set_op("InitializeTable"); init_table_node.set_name( StrCat(weights_node.name(), "/InitializeTable")); SetNodeAttr("Tkey", key_dtype, &init_table_node); SetNodeAttr("Tval", data_type, &init_table_node); init_table_node_names.push_back(init_table_node.name()); NodeDef lookup_node; lookup_node.set_op("LookupTableFind"); lookup_node.set_name(StrCat(gather_node.name(), "/LookupTableFind")); SetNodeAttr("Tin", key_dtype, &lookup_node); SetNodeAttr("Tout", data_type, &lookup_node); Tensor zero_tensor(data_type, TensorShape({})); zero_tensor.flat<float>()(0) = 0.0; NodeDef default_value_node; CreateConstNode(zero_tensor, StrCat(gather_node.name(), "/Const"), &default_value_node); SetNodeAttr("dtype", data_type, &default_value_node); Tensor dim_idx(DT_INT32, TensorShape({})); dim_idx.flat<int32>()(0) = -1; NodeDef dim_idx_node; dim_idx_node.set_op("Const"); dim_idx_node.set_name( StrCat(gather_node.name(), "/ExpandDims/Const")); SetNodeAttr("value", dim_idx, &dim_idx_node); SetNodeAttr("dtype", DT_INT32, &dim_idx_node); NodeDef expand_dims_node; expand_dims_node.set_op("ExpandDims"); expand_dims_node.set_name(gather_node.name()); SetNodeAttr("T", data_type, &expand_dims_node); AddNodeInput(hashtable_node.name(), &init_table_node); refs[hashtable_node.name()]++; AddNodeInput(indices_node.name(), &init_table_node); refs[indices_node.name()]++; AddNodeInput(values_node.name(), &init_table_node); refs[values_node.name()]++; AddNodeInput(hashtable_node.name(), &lookup_node); refs[hashtable_node.name()]++; AddNodeInput(gather_node.input(1), &lookup_node); refs[gather_node.input(1)]++; AddNodeInput(default_value_node.name(), &lookup_node); refs[default_value_node.name()]++; AddNodeInput(lookup_node.name(), &expand_dims_node); refs[lookup_node.name()]++; AddNodeInput(dim_idx_node.name(), &expand_dims_node); refs[dim_idx_node.name()]++; new_nodes->push_back(match.inputs[1].node); new_nodes->push_back(indices_node); new_nodes->push_back(values_node); new_nodes->push_back(hashtable_node); new_nodes->push_back(init_table_node); new_nodes->push_back(lookup_node); new_nodes->push_back(default_value_node); new_nodes->push_back(dim_idx_node); new_nodes->push_back(expand_dims_node); return OkStatus(); }, {true}, &replaced_graph_def)); NodeDef* init_op = nullptr; for (int i = 0; i < replaced_graph_def.node_size(); i++) { if (replaced_graph_def.node(i).name() == group_init_node && replaced_graph_def.node(i).op() == "NoOp") { init_op = replaced_graph_def.mutable_node(i); break; } } if (!init_op) { init_op = replaced_graph_def.mutable_node()->Add(); init_op->set_op("NoOp"); init_op->set_name(group_init_node); } for (const string& name : init_table_node_names) { AddNodeInput(StrCat("^", name), init_op); refs[name]++; } for (const auto& output : context.output_names) { refs.erase(output); } for (const auto& input : context.input_names) { refs.erase(input); } for (const auto& entry : refs) { if (entry.second == 0) { removed_node_names.push_back(entry.first); } } while (!removed_node_names.empty()) { auto name = removed_node_names.back(); removed_node_names.pop_back(); int i = 0; while (i < replaced_graph_def.node_size()) { if ((replaced_graph_def.node(i).name() == name) && (replaced_graph_def.node(i).op() != "RestoreV2")) { for (const auto& input : replaced_graph_def.node(i).input()) { auto parsed_input = StringReplace(input, "^", "", true); refs[parsed_input] -= 1; if (refs[parsed_input] == 0) { removed_node_names.push_back(parsed_input); } } TF_RETURN_IF_ERROR(RemoveNodeAtIndex(&replaced_graph_def, i)); continue; } int j = 0; bool deleted_inputs = false; while (j < replaced_graph_def.node(i).input_size()) { if (replaced_graph_def.node(i).input(j) == name || replaced_graph_def.node(i).input(j) == ("^" + name)) { TF_RETURN_IF_ERROR( RemoveInputAtIndex(replaced_graph_def.mutable_node(i), j)); deleted_inputs = true; continue; } j++; } if (deleted_inputs) { if (replaced_graph_def.node(i).op() == "ConcatV2") { if (replaced_graph_def.node(i).input_size() > 2) { SetNodeAttr("N", replaced_graph_def.node(i).input_size() - 1, replaced_graph_def.mutable_node(i)); } else if (replaced_graph_def.node(i).input_size() == 2) { if (refs[replaced_graph_def.node(i).input(1)] != 1) { return errors::Internal( "Expect axis tensor of ConcatV2 node to only be referenced " "once."); } refs[replaced_graph_def.node(i).input(1)] -= 1; removed_node_names.push_back(replaced_graph_def.node(i).input(1)); replaced_graph_def.mutable_node(i)->mutable_input()->RemoveLast(); replaced_graph_def.mutable_node(i)->mutable_attr()->erase("N"); replaced_graph_def.mutable_node(i)->set_op("Identity"); } else { return errors::Internal( "ConcatV2 should have at least two elements"); } } if ((replaced_graph_def.node(i).op() == "Assign" || replaced_graph_def.node(i).op() == "Reshape" || replaced_graph_def.node(i).op() == "Equal" || replaced_graph_def.node(i).op() == "Mean" || replaced_graph_def.node(i).op() == "ScalarSummary") && replaced_graph_def.node(i).input_size() == 1) { removed_node_names.push_back(replaced_graph_def.node(i).name()); } if (!replaced_graph_def.node(i).input_size()) { removed_node_names.push_back(replaced_graph_def.node(i).name()); } } i++; } } current_graph_def = replaced_graph_def; } while (any_match_found); *output_graph_def = current_graph_def; return OkStatus(); } Status SparsifyGather(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { const OpTypePattern gather_pattern = {"Gather", { {"Identity", { {"Const|Variable|VariableV2"} } }, {"*"}, } }; const OpTypePattern gather_v2_pattern = {"GatherV2", { {"Identity", { {"Const|Variable|VariableV2"} } }, {"*"}, {"Const"}, } }; GraphDef cleaned_input_graph_def; RemoveAttributes(input_graph_def, {"_output_shapes"}, &cleaned_input_graph_def); GraphDef temp_output; std::unique_ptr<BundleReader> ckpt_reader; TF_RETURN_IF_ERROR(InitializeCheckpointReader(context, &ckpt_reader)); std::unique_ptr<std::unordered_map<string, string> > shapes_and_slices; TF_RETURN_IF_ERROR( ObtainVariableInfo(cleaned_input_graph_def, &shapes_and_slices)); TF_RETURN_IF_ERROR(SparsifyGatherInternal( cleaned_input_graph_def, shapes_and_slices, context, gather_pattern, ckpt_reader, &temp_output)); TF_RETURN_IF_ERROR(SparsifyGatherInternal(temp_output, shapes_and_slices, context, gather_v2_pattern, ckpt_reader, output_graph_def)); return OkStatus(); } REGISTER_GRAPH_TRANSFORM("sparsify_gather", SparsifyGather); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status SparsifyGather(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status ReadTensorFromCheckpoint( const string& tensor_name, const std::unique_ptr<BundleReader>& ckpt_reader, const string& shape_and_slice, Tensor* tensor); class SparsifyGatherTest : public ::testing::Test { protected: NodeDef* CreateNode(const StringPiece name, const StringPiece op, const std::vector<NodeDef*>& inputs, GraphDef* graph_def, bool control_dep = false) { NodeDef* node_def = graph_def->add_node(); node_def->set_name(string(name)); node_def->set_op(string(op)); if (!control_dep) { std::for_each(inputs.begin(), inputs.end(), [&node_def](NodeDef* input) { node_def->add_input(input->name()); }); } else { std::for_each(inputs.begin(), inputs.end(), [&node_def](NodeDef* input) { node_def->add_input(strings::StrCat("^", input->name())); }); } return node_def; } void MakeGather(StringPiece name, bool gather_v2, NodeDef* params, NodeDef* indices, GraphDef* graph_def) { if (gather_v2) { NodeDef* axis_node = CreateNode(strings::StrCat(name, "_axis"), "Const", {}, graph_def); Tensor axis_t(DT_INT32, TensorShape({})); axis_t.scalar<int32>()() = 0; SetNodeTensorAttr<int32>("value", axis_t, axis_node); CreateNode(name, "GatherV2", {params, indices, axis_node}, graph_def); } else { CreateNode(name, "Gather", {params, indices}, graph_def); } } void TestSinglePartition(bool gather_v2, bool include_shared_init, bool test_variable, bool test_kept_concat, const string& shared_init_name = "group_deps") { GraphDef graph_def; const auto checkpoint_path = io::JoinPath(testing::TmpDir(), "checkpoint_single"); NodeDef* input_node = CreateNode("ids", "Const", {}, &graph_def); NodeDef* w_node; NodeDef* zeros_const; NodeDef* zeros_shape; NodeDef* zeros_node; NodeDef* assign_node; Tensor weights(DT_FLOAT, TensorShape({4, 1})); test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001}); if (!test_variable) { w_node = CreateNode("w/part_1", "Const", {}, &graph_def); SetNodeTensorAttr<float>("value", weights, w_node); } else { w_node = CreateNode("w/part_1", "VariableV2", {}, &graph_def); zeros_shape = CreateNode("w/part_1/Initializer/zeros/shape_as_tensor", "Const", {}, &graph_def); zeros_const = CreateNode("w/part_1/Initializer/zeros/Const", "Const", {}, &graph_def); zeros_node = CreateNode("w/part_1/Initializer/zeros", "Fill", {zeros_shape, zeros_const}, &graph_def); assign_node = CreateNode("w/part_1/Assign", "Assign", {w_node, zeros_node}, &graph_def); NodeDef* save_const_node = CreateNode("save/Const", "Const", {}, &graph_def); Tensor tensor_names_values(DT_STRING, TensorShape({1})); test::FillValues<tstring>(&tensor_names_values, {"w"}); NodeDef* tensor_names_node = CreateNode("save/RestoreV2/tensor_names", "Const", {}, &graph_def); SetNodeTensorAttr<string>("value", tensor_names_values, tensor_names_node); NodeDef* tensor_shapes_slices_node = CreateNode( "save/RestoreV2/shape_and_slices", "Const", {}, &graph_def); Tensor shapes_slices_val(DT_STRING, TensorShape({1})); shapes_slices_val.flat<tstring>()(0) = "4 1 0,4:0,1"; SetNodeTensorAttr<string>("value", shapes_slices_val, tensor_shapes_slices_node); NodeDef* restore_node = CreateNode( "save/RestoreV2", "RestoreV2", {save_const_node, tensor_names_node, tensor_shapes_slices_node}, &graph_def); CreateNode("save/Assign", "Assign", {w_node, restore_node}, &graph_def); BundleWriter writer(Env::Default(), checkpoint_path); TF_ASSERT_OK(writer.Add("w", weights)); TF_ASSERT_OK(writer.Finish()); } SetNodeAttr("dtype", DT_FLOAT, w_node); NodeDef* identity_node = CreateNode("w/read", "Identity", {w_node}, &graph_def); MakeGather("gather", gather_v2, identity_node, input_node, &graph_def); if (include_shared_init) { if (!test_variable) { CreateNode(shared_init_name, "NoOp", {}, &graph_def); } else { CreateNode(shared_init_name, "NoOp", {assign_node}, &graph_def, true); } } NodeDef* concat_axis_node = CreateNode("linear/concat/axis", "Const", {}, &graph_def); NodeDef* concat_input_node = CreateNode("concat/input/node", "Const", {}, &graph_def); NodeDef* concat_node = nullptr; if (!test_kept_concat) { concat_node = CreateNode( "concat/node", "ConcatV2", {identity_node, concat_input_node, concat_axis_node}, &graph_def); SetNodeAttr("N", 2, concat_node); } else { NodeDef* concat_input_node_2 = CreateNode("concat/input/node_2", "Const", {}, &graph_def); concat_node = CreateNode("concat/node", "ConcatV2", {identity_node, concat_input_node, concat_input_node_2, concat_axis_node}, &graph_def); SetNodeAttr("N", 3, concat_node); } GraphDef result; TransformFuncContext context; context.input_names = {"ids"}; context.output_names = {"gather"}; if (test_variable) { context.params["input_checkpoint"] = {checkpoint_path}; } if (shared_init_name != "group_deps") { context.params["group_init_node"] = {shared_init_name}; } TF_ASSERT_OK(SparsifyGather(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(0, node_lookup.count("w/part_1/Initializer/zeros/shape_as_tensor")); EXPECT_EQ(0, node_lookup.count("w/part_1/Initializer/zeros/Const")); EXPECT_EQ(0, node_lookup.count("w/part_1/Initializer/zeros")); EXPECT_EQ(0, node_lookup.count("w/part_1/Assign")); EXPECT_EQ(1, node_lookup.count("ids")); EXPECT_EQ("Const", node_lookup.at("ids")->op()); EXPECT_EQ(1, node_lookup.count("concat/node")); if (!test_kept_concat) { EXPECT_EQ(0, node_lookup.count("linear/concat/axis")); EXPECT_EQ("Identity", node_lookup.at("concat/node")->op()); EXPECT_EQ(1, node_lookup.at("concat/node")->input_size()); EXPECT_EQ("concat/input/node", node_lookup.at("concat/node")->input(0)); } else { EXPECT_EQ(1, node_lookup.count("linear/concat/axis")); EXPECT_EQ("ConcatV2", node_lookup.at("concat/node")->op()); EXPECT_EQ(3, node_lookup.at("concat/node")->input_size()); EXPECT_EQ("concat/input/node", node_lookup.at("concat/node")->input(0)); EXPECT_EQ("concat/input/node_2", node_lookup.at("concat/node")->input(1)); EXPECT_EQ("linear/concat/axis", node_lookup.at("concat/node")->input(2)); EXPECT_EQ(2, node_lookup.at("concat/node")->attr().at("N").i()); } EXPECT_EQ(1, node_lookup.count("w/part_1/indices")); EXPECT_EQ("Const", node_lookup.at("w/part_1/indices")->op()); Tensor expected_indices_tensor(DT_INT64, TensorShape({3})); test::FillValues<int64_t>(&expected_indices_tensor, {0, 2, 3}); test::ExpectTensorEqual<int64_t>( expected_indices_tensor, GetNodeTensorAttr(*(node_lookup.at("w/part_1/indices")), "value")); EXPECT_EQ(1, node_lookup.count("w/part_1/values")); EXPECT_EQ("Const", node_lookup.at("w/part_1/values")->op()); Tensor expected_values_tensor(DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected_values_tensor, {0.2, 1.2, 0.001}); test::ExpectTensorNear<float>( expected_values_tensor, GetNodeTensorAttr(*(node_lookup.at("w/part_1/values")), "value"), 1e-5); EXPECT_EQ(1, node_lookup.count("w/part_1/HashTable")); EXPECT_EQ("HashTable", node_lookup.at("w/part_1/HashTable")->op()); EXPECT_EQ(1, node_lookup.count("w/part_1/InitializeTable")); EXPECT_EQ("InitializeTable", node_lookup.at("w/part_1/InitializeTable")->op()); EXPECT_EQ(1, node_lookup.count("gather/LookupTableFind")); EXPECT_EQ("LookupTableFind", node_lookup.at("gather/LookupTableFind")->op()); EXPECT_EQ(1, node_lookup.count("gather/Const")); EXPECT_EQ("Const", node_lookup.at("gather/Const")->op()); Tensor expected_gather_default_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_gather_default_tensor, {0.0}); test::ExpectTensorNear<float>( expected_gather_default_tensor, GetNodeTensorAttr(*(node_lookup.at("gather/Const")), "value"), 1e-5); EXPECT_EQ(1, node_lookup.count("gather/ExpandDims/Const")); EXPECT_EQ("Const", node_lookup.at("gather/ExpandDims/Const")->op()); Tensor expected_expand_dims_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&expected_expand_dims_tensor, {-1}); test::ExpectTensorEqual<int32>( expected_expand_dims_tensor, GetNodeTensorAttr(*(node_lookup.at("gather/ExpandDims/Const")), "value")); EXPECT_EQ(1, node_lookup.count("gather")); EXPECT_EQ("ExpandDims", node_lookup.at("gather")->op()); EXPECT_EQ(1, node_lookup.count(shared_init_name)); EXPECT_EQ("NoOp", node_lookup.at(shared_init_name)->op()); EXPECT_EQ("w/part_1/HashTable", node_lookup.at("w/part_1/InitializeTable")->input(0)); EXPECT_EQ("w/part_1/indices", node_lookup.at("w/part_1/InitializeTable")->input(1)); EXPECT_EQ("w/part_1/values", node_lookup.at("w/part_1/InitializeTable")->input(2)); EXPECT_EQ("w/part_1/HashTable", node_lookup.at("gather/LookupTableFind")->input(0)); EXPECT_EQ("ids", node_lookup.at("gather/LookupTableFind")->input(1)); EXPECT_EQ("gather/Const", node_lookup.at("gather/LookupTableFind")->input(2)); EXPECT_EQ("gather/LookupTableFind", node_lookup.at("gather")->input(0)); EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(), node_lookup.at(shared_init_name)->input().end(), "^w/part_1/InitializeTable"), node_lookup.at(shared_init_name)->input().end()); EXPECT_EQ(1, node_lookup.at(shared_init_name)->input().size()); } void TestMultiPartition(bool gather_v2, bool include_shared_init, bool test_variable, const string& shared_init_name = "group_deps") { GraphDef graph_def; const auto checkpoint_path = io::JoinPath(testing::TmpDir(), "checkpoint_multiple"); NodeDef* input_node = CreateNode("ids", "Const", {}, &graph_def); NodeDef* w_node1; NodeDef* w_node2; NodeDef* zeros_const1; NodeDef* zeros_shape1; NodeDef* zeros_node1; NodeDef* zeros_const2; NodeDef* zeros_shape2; NodeDef* zeros_node2; NodeDef* assign_node1; NodeDef* assign_node2; Tensor weights(DT_FLOAT, TensorShape({4, 1})); test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001}); if (!test_variable) { w_node1 = CreateNode("w1/part_1", "Const", {}, &graph_def); w_node2 = CreateNode("w2/part_1", "Const", {}, &graph_def); SetNodeTensorAttr<float>("value", weights, w_node1); SetNodeTensorAttr<float>("value", weights, w_node2); } else { NodeDef* save_const_node = CreateNode("save/Const", "Const", {}, &graph_def); NodeDef* tensor_names_node = CreateNode("save/RestoreV2/tensor_names", "Const", {}, &graph_def); Tensor tensor_names_values(DT_STRING, TensorShape({2})); test::FillValues<tstring>(&tensor_names_values, {"w1", "w2"}); SetNodeTensorAttr<string>("value", tensor_names_values, tensor_names_node); NodeDef* tensor_shapes_slices_node = CreateNode( "save/RestoreV2/shape_and_slices", "Const", {}, &graph_def); Tensor shapes_slices_val(DT_STRING, TensorShape({2})); shapes_slices_val.flat<tstring>()(0) = "4 1 0,4:0,1"; shapes_slices_val.flat<tstring>()(1) = "4 1 0,4:0,1"; SetNodeTensorAttr<string>("value", shapes_slices_val, tensor_shapes_slices_node); NodeDef* restore_node = CreateNode( "save/RestoreV2", "RestoreV2", {save_const_node, tensor_names_node, tensor_shapes_slices_node}, &graph_def); w_node1 = CreateNode("w1/part_1", "VariableV2", {}, &graph_def); zeros_shape1 = CreateNode("w1/part_1/Initializer/zeros/shape_as_tensor", "Const", {}, &graph_def); zeros_const1 = CreateNode("w1/part_1/Initializer/zeros/Const", "Const", {}, &graph_def); zeros_node1 = CreateNode("w1/part_1/Initializer/zeros", "Fill", {zeros_shape1, zeros_const1}, &graph_def); assign_node1 = CreateNode("w1/part_1/Assign", "Assign", {w_node1, zeros_node1}, &graph_def); CreateNode("save/Assign", "Assign", {w_node1, restore_node}, &graph_def); w_node2 = CreateNode("w2/part_1", "VariableV2", {}, &graph_def); zeros_shape2 = CreateNode("w2/part_1/Initializer/zeros/shape_as_tensor", "Const", {}, &graph_def); zeros_const2 = CreateNode("w2/part_1/Initializer/zeros/Const", "Const", {}, &graph_def); zeros_node2 = CreateNode("w2/part_1/Initializer/zeros", "Fill", {zeros_shape2, zeros_const2}, &graph_def); assign_node2 = CreateNode("w2/part_1/Assign", "Assign", {w_node2, zeros_node2}, &graph_def); CreateNode("save/Assign_1", "Assign", {w_node2, restore_node}, &graph_def); BundleWriter writer(Env::Default(), checkpoint_path); TF_ASSERT_OK(writer.Add("w1", weights)); TF_ASSERT_OK(writer.Add("w2", weights)); TF_ASSERT_OK(writer.Finish()); } SetNodeAttr("dtype", DT_FLOAT, w_node1); SetNodeAttr("dtype", DT_FLOAT, w_node2); NodeDef* identity_node1 = CreateNode("w1/part_1/read", "Identity", {w_node1}, &graph_def); NodeDef* identity_node2 = CreateNode("w2/part_1/read", "Identity", {w_node2}, &graph_def); MakeGather("gather1", gather_v2, identity_node1, input_node, &graph_def); MakeGather("gather2", gather_v2, identity_node2, input_node, &graph_def); NodeDef* concat_axis_node = CreateNode("linear/concat/axis", "Const", {}, &graph_def); NodeDef* concat_node = CreateNode( "concat/node", "ConcatV2", {identity_node1, identity_node2, concat_axis_node}, &graph_def); SetNodeAttr("N", 2, concat_node); if (include_shared_init) { if (!test_variable) { CreateNode(shared_init_name, "NoOp", {}, &graph_def); } else { CreateNode(shared_init_name, "NoOp", {assign_node1, assign_node2}, &graph_def, true); } } GraphDef result; TransformFuncContext context; context.input_names = {"ids"}; context.output_names = {"gather1", "gather2"}; if (test_variable) { context.params["input_checkpoint"] = {checkpoint_path}; } if (shared_init_name != "group_deps") { context.params["group_init_node"] = {shared_init_name}; } TF_ASSERT_OK(SparsifyGather(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(0, node_lookup.count("w1/part_1/Initializer/zeros/shape_as_tensor")); EXPECT_EQ(0, node_lookup.count("w1/part_1/Initializer/zeros/Const")); EXPECT_EQ(0, node_lookup.count("w1/part_1/Initializer/zeros")); EXPECT_EQ(0, node_lookup.count("w1/part_1/Assign")); EXPECT_EQ(0, node_lookup.count("w2/part_1/Initializer/zeros/shape_as_tensor")); EXPECT_EQ(0, node_lookup.count("w2/part_1/Initializer/zeros/Const")); EXPECT_EQ(0, node_lookup.count("w2/part_1/Initializer/zeros")); EXPECT_EQ(0, node_lookup.count("w2/part_1/Assign")); EXPECT_EQ(1, node_lookup.count("ids")); EXPECT_EQ("Const", node_lookup.at("ids")->op()); EXPECT_EQ(1, node_lookup.count(shared_init_name)); EXPECT_EQ("NoOp", node_lookup.at(shared_init_name)->op()); EXPECT_EQ(1, node_lookup.count("w1/part_1/indices")); EXPECT_EQ("Const", node_lookup.at("w1/part_1/indices")->op()); Tensor expected_indices_tensor1(DT_INT64, TensorShape({3})); test::FillValues<int64_t>(&expected_indices_tensor1, {0, 2, 3}); test::ExpectTensorEqual<int64_t>( expected_indices_tensor1, GetNodeTensorAttr(*(node_lookup.at("w1/part_1/indices")), "value")); EXPECT_EQ(1, node_lookup.count("w1/part_1/values")); EXPECT_EQ("Const", node_lookup.at("w1/part_1/values")->op()); Tensor expected_values_tensor1(DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected_values_tensor1, {0.2, 1.2, 0.001}); test::ExpectTensorNear<float>( expected_values_tensor1, GetNodeTensorAttr(*(node_lookup.at("w1/part_1/values")), "value"), 1e-5); EXPECT_EQ(1, node_lookup.count("w1/part_1/HashTable")); EXPECT_EQ("HashTable", node_lookup.at("w1/part_1/HashTable")->op()); EXPECT_EQ(1, node_lookup.count("w1/part_1/InitializeTable")); EXPECT_EQ("InitializeTable", node_lookup.at("w1/part_1/InitializeTable")->op()); EXPECT_EQ(1, node_lookup.count("gather1/LookupTableFind")); EXPECT_EQ("LookupTableFind", node_lookup.at("gather1/LookupTableFind")->op()); EXPECT_EQ(1, node_lookup.count("gather1/Const")); EXPECT_EQ("Const", node_lookup.at("gather1/Const")->op()); Tensor expected_gather_default_tensor1(DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_gather_default_tensor1, {0.0}); test::ExpectTensorNear<float>( expected_gather_default_tensor1, GetNodeTensorAttr(*(node_lookup.at("gather1/Const")), "value"), 1e-5); EXPECT_EQ(1, node_lookup.count("gather1/ExpandDims/Const")); EXPECT_EQ("Const", node_lookup.at("gather1/ExpandDims/Const")->op()); Tensor expected_expand_dims_tensor1(DT_INT32, TensorShape({})); test::FillValues<int32>(&expected_expand_dims_tensor1, {-1}); test::ExpectTensorEqual<int32>( expected_expand_dims_tensor1, GetNodeTensorAttr(*(node_lookup.at("gather1/ExpandDims/Const")), "value")); EXPECT_EQ(1, node_lookup.count("gather1")); EXPECT_EQ("ExpandDims", node_lookup.at("gather1")->op()); EXPECT_EQ(1, node_lookup.count("w2/part_1/indices")); EXPECT_EQ("Const", node_lookup.at("w2/part_1/indices")->op()); Tensor expected_indices_tensor2(DT_INT64, TensorShape({3})); test::FillValues<int64_t>(&expected_indices_tensor2, {0, 2, 3}); test::ExpectTensorEqual<int64_t>( expected_indices_tensor2, GetNodeTensorAttr(*(node_lookup.at("w2/part_1/indices")), "value")); EXPECT_EQ(1, node_lookup.count("w2/part_1/values")); EXPECT_EQ("Const", node_lookup.at("w2/part_1/values")->op()); Tensor expected_values_tensor2(DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected_values_tensor2, {0.2, 1.2, 0.001}); test::ExpectTensorNear<float>( expected_values_tensor2, GetNodeTensorAttr(*(node_lookup.at("w2/part_1/values")), "value"), 1e-5); EXPECT_EQ(1, node_lookup.count("w2/part_1/HashTable")); EXPECT_EQ("HashTable", node_lookup.at("w2/part_1/HashTable")->op()); EXPECT_EQ(1, node_lookup.count("w2/part_1/InitializeTable")); EXPECT_EQ("InitializeTable", node_lookup.at("w2/part_1/InitializeTable")->op()); EXPECT_EQ(1, node_lookup.count("gather2/LookupTableFind")); EXPECT_EQ("LookupTableFind", node_lookup.at("gather2/LookupTableFind")->op()); EXPECT_EQ(1, node_lookup.count("gather2/Const")); EXPECT_EQ("Const", node_lookup.at("gather2/Const")->op()); Tensor expected_gather_default_tensor2(DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_gather_default_tensor2, {0.0}); test::ExpectTensorNear<float>( expected_gather_default_tensor2, GetNodeTensorAttr(*(node_lookup.at("gather2/Const")), "value"), 1e-5); EXPECT_EQ(1, node_lookup.count("gather2/ExpandDims/Const")); EXPECT_EQ("Const", node_lookup.at("gather2/ExpandDims/Const")->op()); Tensor expected_expand_dims_tensor2(DT_INT32, TensorShape({})); test::FillValues<int32>(&expected_expand_dims_tensor2, {-1}); test::ExpectTensorEqual<int32>( expected_expand_dims_tensor2, GetNodeTensorAttr(*(node_lookup.at("gather2/ExpandDims/Const")), "value")); EXPECT_EQ(1, node_lookup.count("gather2")); EXPECT_EQ("ExpandDims", node_lookup.at("gather2")->op()); EXPECT_EQ("w1/part_1/HashTable", node_lookup.at("w1/part_1/InitializeTable")->input(0)); EXPECT_EQ("w1/part_1/indices", node_lookup.at("w1/part_1/InitializeTable")->input(1)); EXPECT_EQ("w1/part_1/values", node_lookup.at("w1/part_1/InitializeTable")->input(2)); EXPECT_EQ("w2/part_1/HashTable", node_lookup.at("w2/part_1/InitializeTable")->input(0)); EXPECT_EQ("w2/part_1/indices", node_lookup.at("w2/part_1/InitializeTable")->input(1)); EXPECT_EQ("w2/part_1/values", node_lookup.at("w2/part_1/InitializeTable")->input(2)); EXPECT_EQ("w1/part_1/HashTable", node_lookup.at("gather1/LookupTableFind")->input(0)); EXPECT_EQ("ids", node_lookup.at("gather1/LookupTableFind")->input(1)); EXPECT_EQ("gather1/Const", node_lookup.at("gather1/LookupTableFind")->input(2)); EXPECT_EQ("gather1/LookupTableFind", node_lookup.at("gather1")->input(0)); EXPECT_EQ("w2/part_1/HashTable", node_lookup.at("gather2/LookupTableFind")->input(0)); EXPECT_EQ("ids", node_lookup.at("gather2/LookupTableFind")->input(1)); EXPECT_EQ("gather2/Const", node_lookup.at("gather2/LookupTableFind")->input(2)); EXPECT_EQ("gather2/LookupTableFind", node_lookup.at("gather2")->input(0)); EXPECT_EQ(0, node_lookup.count("linear/concat/axis")); EXPECT_EQ(0, node_lookup.count("concat/node")); EXPECT_EQ(2, node_lookup.at(shared_init_name)->input_size()); EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(), node_lookup.at(shared_init_name)->input().end(), "^w1/part_1/InitializeTable"), node_lookup.at(shared_init_name)->input().end()); EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(), node_lookup.at(shared_init_name)->input().end(), "^w2/part_1/InitializeTable"), node_lookup.at(shared_init_name)->input().end()); } void TestReadTensorSlice() { const auto checkpoint_path = io::JoinPath(testing::TmpDir(), "checkpoint_slice"); Tensor weights(DT_FLOAT, TensorShape({2, 1})); test::FillValues<float>(&weights, {0.2, 0.000001}); BundleWriter writer(Env::Default(), checkpoint_path); TF_ASSERT_OK(writer.AddSlice("w", TensorShape({4, 1}), TensorSlice::ParseOrDie("0,2:0,1"), weights)); TF_ASSERT_OK(writer.Finish()); std::unique_ptr<BundleReader> reader( new BundleReader(Env::Default(), checkpoint_path)); Tensor results; TF_ASSERT_OK( ReadTensorFromCheckpoint("w/part_0", reader, "4 1 0,2:0,1", &results)); test::ExpectTensorEqual<float>(weights, results); } }; TEST_F(SparsifyGatherTest, TestSinglePartition) { TestSinglePartition(false, false, false, false); TestSinglePartition(false, true, false, false); TestSinglePartition(true, false, false, false); TestSinglePartition(true, true, false, false); TestSinglePartition(false, false, true, false); TestSinglePartition(false, true, true, false); TestSinglePartition(true, false, true, false); TestSinglePartition(true, true, true, false); TestSinglePartition(false, true, false, false, "shared_inits"); TestSinglePartition(true, true, false, false, "shared_inits"); TestSinglePartition(false, true, true, false, "shared_inits"); TestSinglePartition(true, true, true, false, "shared_inits"); TestSinglePartition(false, false, false, true); TestSinglePartition(false, true, false, true); TestSinglePartition(true, false, false, true); TestSinglePartition(true, true, false, true); TestSinglePartition(false, false, true, true); TestSinglePartition(false, true, true, true); TestSinglePartition(true, false, true, true); TestSinglePartition(true, true, true, true); TestSinglePartition(false, true, false, true, "shared_inits"); TestSinglePartition(true, true, false, true, "shared_inits"); TestSinglePartition(false, true, true, true, "shared_inits"); TestSinglePartition(true, true, true, true, "shared_inits"); } TEST_F(SparsifyGatherTest, TestMultiPartition) { TestMultiPartition(false, false, false); TestMultiPartition(false, true, false); TestMultiPartition(true, false, false); TestMultiPartition(true, true, false); TestMultiPartition(false, false, true); TestMultiPartition(false, true, true); TestMultiPartition(true, false, true); TestMultiPartition(true, true, true); TestMultiPartition(false, true, false, "shared_inits"); TestMultiPartition(true, true, false, "shared_inits"); TestMultiPartition(false, true, true, "shared_inits"); TestMultiPartition(true, true, true, "shared_inits"); } TEST_F(SparsifyGatherTest, TestTensorSlice) { TestReadTensorSlice(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/sparsify_gather.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/sparsify_gather_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
069a3ccd-321a-4f16-867b-1bd9d0c811d6
cpp
tensorflow/tensorflow
rename_op
tensorflow/tools/graph_transforms/rename_op.cc
tensorflow/tools/graph_transforms/rename_op_test.cc
#include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/fold_constants_lib.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RenameOp(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { if (!context.params.count("old_op_name") || (context.params.at("old_op_name").size() != 1) || !context.params.count("new_op_name") || (context.params.at("new_op_name").size() != 1)) { return errors::InvalidArgument( "rename_op expects exactly one 'old_op_name' and 'new_op_name' " "argument, e.g. rename_op(old_op_name=Mul, new_op_name=Multiply)"); } const string old_op_name = context.params.at("old_op_name")[0]; const string new_op_name = context.params.at("new_op_name")[0]; output_graph_def->Clear(); for (const NodeDef& node : input_graph_def.node()) { NodeDef* new_node = output_graph_def->mutable_node()->Add(); *new_node = node; if (node.op() == old_op_name) { new_node->set_op(new_op_name); } } return OkStatus(); } REGISTER_GRAPH_TRANSFORM("rename_op", RenameOp); } }
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status RenameOp(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class RenameOpTest : public ::testing::Test { protected: void TestRenameOp() { GraphDef graph_def; NodeDef* mul_node1 = graph_def.add_node(); mul_node1->set_name("mul_node1"); mul_node1->set_op("Mul"); mul_node1->add_input("add_node2"); mul_node1->add_input("add_node3"); NodeDef* add_node2 = graph_def.add_node(); add_node2->set_name("add_node2"); add_node2->set_op("Add"); add_node2->add_input("const_node1"); add_node2->add_input("const_node2"); NodeDef* add_node3 = graph_def.add_node(); add_node3->set_name("add_node3"); add_node3->set_op("Add"); add_node3->add_input("const_node1"); add_node3->add_input("const_node3"); NodeDef* const_node1 = graph_def.add_node(); const_node1->set_name("const_node1"); const_node1->set_op("Const"); NodeDef* const_node2 = graph_def.add_node(); const_node2->set_name("const_node2"); const_node2->set_op("Const"); NodeDef* const_node3 = graph_def.add_node(); const_node3->set_name("const_node3"); const_node3->set_op("Const"); NodeDef* add_node4 = graph_def.add_node(); add_node4->set_name("add_node4"); add_node4->set_op("Add"); add_node4->add_input("add_node2"); add_node4->add_input("add_node3"); GraphDef result; TransformFuncContext context; context.input_names = {}; context.output_names = {"mul_node1"}; context.params.insert(std::pair<string, std::vector<string>>( {"old_op_name", {string("Mul")}})); context.params.insert(std::pair<string, std::vector<string>>( {"new_op_name", {string("Multiply")}})); TF_ASSERT_OK(RenameOp(graph_def, context, &result)); std::map<string, const NodeDef*> node_lookup; MapNamesToNodes(result, &node_lookup); EXPECT_EQ(1, node_lookup.count("mul_node1")); EXPECT_EQ("Multiply", node_lookup.at("mul_node1")->op()); EXPECT_EQ(1, node_lookup.count("add_node2")); EXPECT_EQ("Add", node_lookup.at("add_node2")->op()); EXPECT_EQ(1, node_lookup.count("add_node3")); EXPECT_EQ("Add", node_lookup.at("add_node3")->op()); EXPECT_EQ(1, node_lookup.count("add_node4")); EXPECT_EQ("Add", node_lookup.at("add_node4")->op()); EXPECT_EQ(1, node_lookup.count("const_node1")); EXPECT_EQ("Const", node_lookup.at("const_node1")->op()); EXPECT_EQ(1, node_lookup.count("const_node2")); EXPECT_EQ("Const", node_lookup.at("const_node2")->op()); EXPECT_EQ(1, node_lookup.count("const_node3")); EXPECT_EQ("Const", node_lookup.at("const_node3")->op()); } }; TEST_F(RenameOpTest, TestRenameOp) { TestRenameOp(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0f6c670e-b625-4ea8-843d-9d8f3e1fbe43
cpp
tensorflow/tensorflow
string_split_op
tensorflow/core/kernels/string_split_op.cc
tensorflow/core/kernels/string_split_op_test.cc
#include <string> #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/strings/str_util.h" namespace tensorflow { namespace { template <typename Predicate> std::vector<StringPiece> SplitOnChar(const tstring& str, const char delim, Predicate p) { std::vector<StringPiece> result; StringPiece text(str); auto f = text.find(delim); while (f != StringPiece::npos) { StringPiece token = text.substr(0, f); if (p(token)) { result.emplace_back(token); } text.remove_prefix(f + 1); f = text.find(delim); } if (p(text)) { result.push_back(text); } return result; } template <typename Predicate> std::vector<StringPiece> SplitOnCharSet(const tstring& str, const tstring& delim_set, Predicate p) { std::vector<StringPiece> result; StringPiece text(str); StringPiece delims(delim_set); size_t token_start = 0; for (size_t i = 0; i < text.size() + 1; i++) { if ((i == text.size()) || (delims.find(text[i]) != StringPiece::npos)) { StringPiece token(text.data() + token_start, i - token_start); if (p(token)) { result.emplace_back(token); } token_start = i + 1; } } return result; } template <typename Predicate> std::vector<StringPiece> Split(const tstring& str, const tstring& delimiter, Predicate predicate) { if (str.empty()) { return std::vector<StringPiece>(); } if (delimiter.empty()) { std::vector<StringPiece> result; result.resize(str.size()); for (size_t i = 0; i < str.size(); ++i) { result[i] = StringPiece(str.data() + i, 1); } return result; } if (delimiter.size() == 1) { return SplitOnChar(str, delimiter[0], predicate); } return SplitOnCharSet(str, delimiter, predicate); } std::vector<StringPiece> SplitV2(const tstring& str, StringPiece sep, int maxsplit) { std::vector<StringPiece> result; StringPiece text(str); if (maxsplit == 0) { result.emplace_back(text); return result; } if (sep.empty()) { StringPiece token; str_util::RemoveLeadingWhitespace(&text); int split = 0; while (str_util::ConsumeNonWhitespace(&text, &token)) { result.push_back(token); str_util::RemoveLeadingWhitespace(&text); ++split; if (maxsplit > 0 && split == maxsplit) { result.push_back(text); return result; } } return result; } auto p = std::search(text.begin(), text.end(), sep.begin(), sep.end()); int split = 0; while (p != text.end()) { StringPiece token = text.substr(0, p - text.begin()); result.push_back(token); text.remove_prefix(token.size()); text.remove_prefix(sep.size()); ++split; if (maxsplit > 0 && split == maxsplit) { result.push_back(StringPiece(text)); return result; } p = std::search(text.begin(), text.end(), sep.begin(), sep.end()); } result.push_back(text); return result; } } class StringSplitOp : public OpKernel { public: explicit StringSplitOp(OpKernelConstruction* context) : OpKernel(context), skip_empty_(true) { bool skip_empty; if (context->GetAttr("skip_empty", &skip_empty).ok()) { skip_empty_ = skip_empty; } } void Compute(OpKernelContext* ctx) override { const Tensor* input_tensor; OP_REQUIRES_OK(ctx, ctx->input("input", &input_tensor)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(input_tensor->shape()), errors::InvalidArgument("input must be a vector, got shape: ", input_tensor->shape().DebugString())); const auto input_vec = input_tensor->vec<tstring>(); const int64_t batch_size = input_vec.dimension(0); const Tensor* delimiter_tensor; OP_REQUIRES_OK(ctx, ctx->input("delimiter", &delimiter_tensor)); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(delimiter_tensor->shape()), errors::InvalidArgument("delimiter must be a scalar, got shape: ", delimiter_tensor->shape().DebugString())); const auto delimiter_vec = delimiter_tensor->flat<tstring>(); const tstring& delimiter = delimiter_vec(0); std::vector<StringPiece> tokens; static constexpr int kReserveSize = 4; tokens.reserve(batch_size * kReserveSize); int64_t output_size = 0; int64_t max_num_entries = 0; std::vector<int64_t> num_indices(batch_size); for (int64_t i = 0; i < batch_size; ++i) { std::vector<StringPiece> parts = skip_empty_ ? Split(input_vec(i), delimiter, str_util::SkipEmpty()) : Split(input_vec(i), delimiter, str_util::AllowEmpty()); int64_t n_entries = parts.size(); num_indices[i] = n_entries; output_size += n_entries; max_num_entries = std::max(max_num_entries, n_entries); tokens.insert(tokens.end(), std::make_move_iterator(parts.begin()), std::make_move_iterator(parts.end())); } Tensor* sp_indices_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({output_size, 2}), &sp_indices_t)); Tensor* sp_tokens_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({output_size}), &sp_tokens_t)); Tensor* sp_shape_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({2}), &sp_shape_t)); auto sp_indices = sp_indices_t->matrix<int64_t>(); auto sp_tokens = sp_tokens_t->vec<tstring>(); auto sp_shape = sp_shape_t->vec<int64_t>(); sp_shape(0) = batch_size; sp_shape(1) = max_num_entries; size_t c = 0; for (size_t i = 0; i < batch_size; ++i) { for (size_t j = 0; j < num_indices[i]; ++j) { sp_indices(c, 0) = i; sp_indices(c, 1) = j; sp_tokens(c).assign(tokens[c].data(), tokens[c].size()); ++c; } } } private: bool skip_empty_; }; class StringSplitV2Op : public OpKernel { public: explicit StringSplitV2Op(OpKernelConstruction* context) : OpKernel(context), maxsplit_(-1) { OP_REQUIRES_OK(context, context->GetAttr("maxsplit", &maxsplit_)); } void Compute(OpKernelContext* ctx) override { const Tensor* input_tensor; OP_REQUIRES_OK(ctx, ctx->input("input", &input_tensor)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(input_tensor->shape()), errors::InvalidArgument("input must be a vector, got shape: ", input_tensor->shape().DebugString())); const auto input_vec = input_tensor->vec<tstring>(); const int64_t batch_size = input_vec.dimension(0); const Tensor* sep_tensor; OP_REQUIRES_OK(ctx, ctx->input("sep", &sep_tensor)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(sep_tensor->shape()), errors::InvalidArgument("sep must be a scalar, got shape: ", sep_tensor->shape().DebugString())); const auto sep_vec = sep_tensor->flat<tstring>(); StringPiece sep(sep_vec(0)); std::vector<StringPiece> tokens; static constexpr int kReserveSize = 4; tokens.reserve(batch_size * kReserveSize); int64_t output_size = 0; int64_t max_num_entries = 0; std::vector<int64_t> num_indices(batch_size); for (int64_t i = 0; i < batch_size; ++i) { std::vector<StringPiece> parts = SplitV2(input_vec(i), sep, maxsplit_); int64_t n_entries = parts.size(); num_indices[i] = n_entries; output_size += n_entries; max_num_entries = std::max(max_num_entries, n_entries); tokens.insert(tokens.end(), parts.begin(), parts.end()); } Tensor* sp_indices_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({output_size, 2}), &sp_indices_t)); Tensor* sp_tokens_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({output_size}), &sp_tokens_t)); Tensor* sp_shape_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({2}), &sp_shape_t)); auto sp_indices = sp_indices_t->matrix<int64_t>(); auto sp_tokens = sp_tokens_t->vec<tstring>(); auto sp_shape = sp_shape_t->vec<int64_t>(); sp_shape(0) = batch_size; sp_shape(1) = max_num_entries; size_t c = 0; for (size_t i = 0; i < batch_size; ++i) { for (size_t j = 0; j < num_indices[i]; ++j) { sp_indices(c, 0) = i; sp_indices(c, 1) = j; sp_tokens(c).assign(tokens[c].data(), tokens[c].size()); ++c; } } } private: int maxsplit_; }; REGISTER_KERNEL_BUILDER(Name("StringSplit").Device(DEVICE_CPU), StringSplitOp); REGISTER_KERNEL_BUILDER(Name("StringSplitV2").Device(DEVICE_CPU), StringSplitV2Op); }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { const char* lines[] = { "**TensorFlow** is an open source software library for numerical " "computation using data flow graphs.", "The graph nodes represent mathematical operations, while the graph edges " "represent the multidimensional data arrays (tensors) that flow between " "them.", "This flexible architecture enables you to deploy computation to one or " "more CPUs or GPUs in a desktop, server, or mobile device without " "rewriting code.", "TensorFlow also includes " "[TensorBoard](https: "summaries_and_tensorboard), a data visualization toolkit.", "TensorFlow was originally developed by researchers and engineers working " "on the Google Brain team within Google's Machine Intelligence Research " "organization for the purposes of conducting machine learning and deep " "neural networks research.", "The system is general enough to be applicable in a wide variety of other " "domains, as well.", "TensorFlow provides stable Python API and C APIs as well as without API " "backwards compatibility guarantee like C++, Go, Java, JavaScript and " "Swift."}; Tensor GetTestTensor(int batch) { const int sz = TF_ARRAYSIZE(lines); Tensor t(DT_STRING, {batch}); auto s = t.flat<tstring>(); for (int i = 0; i < batch; ++i) { s(i) = lines[i % sz]; } return t; } Graph* SetupStringSplitGraph(const Tensor& input) { Graph* g = new Graph(OpRegistry::Global()); Tensor delim(DT_STRING, TensorShape({})); delim.flat<tstring>().setConstant(" "); TF_CHECK_OK(NodeBuilder("string_split_op", "StringSplit") .Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, delim)) .Finalize(g, nullptr )); return g; } static void BM_StringSplit(::testing::benchmark::State& state) { const int batch_size = state.range(0); Tensor input = GetTestTensor(batch_size); Graph* g = SetupStringSplitGraph(input); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StringSplit) ->UseRealTime() ->Arg(1) ->Arg(8) ->Arg(16) ->Arg(32) ->Arg(64) ->Arg(128) ->Arg(256); Graph* SetupStringSplitV2Graph(const Tensor& input) { Graph* g = new Graph(OpRegistry::Global()); Tensor sep(DT_STRING, TensorShape({})); sep.flat<tstring>().setConstant(" "); TF_CHECK_OK(NodeBuilder("string_split_op", "StringSplitV2") .Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, sep)) .Finalize(g, nullptr )); return g; } static void BM_StringSplitV2(::testing::benchmark::State& state) { const int batch_size = state.range(0); Tensor input = GetTestTensor(batch_size); Graph* g = SetupStringSplitV2Graph(input); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StringSplitV2) ->UseRealTime() ->Arg(1) ->Arg(8) ->Arg(16) ->Arg(32) ->Arg(64) ->Arg(128) ->Arg(256); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/string_split_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/string_split_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b4765acf-7156-479d-b560-41b2e7874fdd
cpp
tensorflow/tensorflow
tensor_array
tensorflow/lite/kernels/variants/tensor_array.cc
tensorflow/lite/kernels/variants/tensor_array_test.cc
#include "tensorflow/lite/kernels/variants/tensor_array.h" #include <cstring> #include "tensorflow/lite/array.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/util.h" namespace tflite { namespace variants { TensorArray::TensorArray(const TensorArray& other) { TfLiteIntArray* copied_shape = TfLiteIntArrayCopy(other.element_shape_.get()); element_shape_ = IntArrayUniquePtr(copied_shape); element_type_ = other.element_type_; num_elements_ = other.num_elements_; elements_ = (RefCountedTensor*)malloc(sizeof(RefCountedTensor) * other.num_elements_); other.AssignBuffer(elements_); } TensorArray& TensorArray::operator=(const TensorArray& other) { TfLiteIntArray* copied_shape = TfLiteIntArrayCopy(other.element_shape_.get()); element_shape_ = IntArrayUniquePtr(copied_shape); Resize(other.num_elements_); Clear(); other.AssignBuffer(elements_); return *this; } void TensorArray::Resize(int num_elements) { if (num_elements == NumElements() || num_elements < 0) return; if (num_elements > NumElements()) { elements_ = (RefCountedTensor*)realloc( elements_, num_elements * sizeof(RefCountedTensor)); for (int i = NumElements(); i < num_elements; ++i) { elements_[i].count = nullptr; elements_[i].tensor = nullptr; } } else { for (int i = num_elements; i < NumElements(); ++i) { Drop(i); } elements_ = (RefCountedTensor*)realloc( elements_, num_elements * sizeof(RefCountedTensor)); } num_elements_ = num_elements; } const TfLiteTensor* TensorArray::At(int index) const { if (index < 0 || index >= NumElements()) { return nullptr; } return elements_[index].tensor; } bool TensorArray::Set(int index, TensorUniquePtr tensor) { if (index < 0 || index >= NumElements()) { return false; } Drop(index); int* c = (int*)malloc(sizeof(int)); *c = 1; elements_[index].tensor = tensor.release(); elements_[index].count = c; return true; } TensorArray::~TensorArray() { Clear(); free(elements_); elements_ = nullptr; } void TensorArray::Drop(int i) { RefCountedTensor* t = elements_ + i; int* count = t->count; if (count == nullptr) { return; } if (*count == 1) { TfLiteTensorFree(t->tensor); free(t->tensor); free(t->count); t->tensor = nullptr; t->count = nullptr; return; } (*count)--; } void TensorArray::Clear() { for (int i = 0; i < num_elements_; ++i) { Drop(i); } } void TensorArray::AssignBuffer(RefCountedTensor* dst) const { std::memcpy(dst, elements_, sizeof(RefCountedTensor) * num_elements_); for (int i = 0; i < num_elements_; ++i) { if (dst[i].count == nullptr) { continue; } (*dst[i].count)++; } } } }
#include "tensorflow/lite/kernels/variants/tensor_array.h" #include <memory> #include <numeric> #include <optional> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/array.h" #include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/core/c/c_api_types.h" #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/portable_type_to_tflitetype.h" #include "tensorflow/lite/util.h" namespace tflite { namespace variants { namespace { template <typename T> TensorUniquePtr MakeTensorWithData(std::vector<int> dims, const std::vector<T>& data) { TensorUniquePtr tensor = BuildTfLiteTensor(typeToTfLiteType<T>(), dims, kTfLiteDynamic); const int num_elements = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>()); T* data_start = (T*)tensor->data.data; for (int i = 0; i < num_elements; ++i) { data_start[i] = data[i]; } return tensor; } TensorArray MakeTensorArrayForTest(const std::vector<int>& dims) { return TensorArray(kTfLiteInt32, BuildTfLiteArray(dims)); } TEST(TensorArrayTest, InsertSingleElement) { auto arr = MakeTensorArrayForTest({}); arr.Resize(2); ASSERT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4}))); const TfLiteTensor* added_tensor = arr.At(0); ASSERT_TRUE(added_tensor != nullptr); ASSERT_THAT(added_tensor, DimsAre({2})); EXPECT_EQ(added_tensor->data.i32[0], 3); EXPECT_EQ(added_tensor->data.i32[1], 4); } TEST(TensorArrayTest, ResizeToZero) { auto arr = MakeTensorArrayForTest({}); arr.Resize(2); EXPECT_EQ(arr.NumElements(), 2); arr.Resize(0); EXPECT_EQ(arr.NumElements(), 0); } TEST(TensorArrayTest, InsertOOB) { auto arr = MakeTensorArrayForTest({}); TensorUniquePtr tensor = MakeTensorWithData<int>({2}, {3, 4}); arr.Resize(1); ASSERT_FALSE(arr.Set(-1, std::move(tensor))); EXPECT_FALSE(arr.At(0)); } TEST(TensorArrayTest, InsertMultipleElements) { auto arr = MakeTensorArrayForTest({}); arr.Resize(2); EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4}))); EXPECT_TRUE(arr.Set(1, MakeTensorWithData<int>({3}, {3, 4, 5}))); EXPECT_THAT(arr.At(0), DimsAre({2})); EXPECT_THAT(arr.At(1), DimsAre({3})); } TEST(TensorArrayTest, InsertSameIndexTwiceDeletes) { auto arr = MakeTensorArrayForTest({}); arr.Resize(2); EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 2}))); EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({3}, {3, 4, 5}))); EXPECT_THAT(arr.At(0), DimsAre({3})); } TEST(TensorArrayTest, ResizeUpWithElements) { auto arr = MakeTensorArrayForTest({}); arr.Resize(1); ASSERT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4}))); arr.Resize(2); EXPECT_THAT(arr.At(0), DimsAre({2})); EXPECT_FALSE(arr.At(1)); EXPECT_EQ(arr.NumElements(), 2); } TEST(TensorArrayTest, ResizeDownDeletesElements) { auto arr = MakeTensorArrayForTest({}); arr.Resize(2); ASSERT_TRUE(arr.Set(1, MakeTensorWithData<int>({2}, {3, 4}))); arr.Resize(1); EXPECT_EQ(arr.NumElements(), 1); EXPECT_FALSE(arr.At(0)); } TEST(TensorArrayTest, CopyListWithZeroLength) { auto arr = MakeTensorArrayForTest({}); TensorArray arr2{arr}; EXPECT_EQ(arr.NumElements(), arr2.NumElements()); EXPECT_EQ(arr.NumElements(), 0); } TEST(TensorArrayTest, CopyAssignListWithZeroLength) { auto arr = MakeTensorArrayForTest({}); arr = MakeTensorArrayForTest({2, 2}); EXPECT_EQ(arr.NumElements(), 0); EXPECT_THAT(arr.ElementShape(), DimsAre({2, 2})); } TEST(TensorArrayTest, CopyEmptyList) { auto arr = MakeTensorArrayForTest({}); arr.Resize(2); TensorArray arr2{arr}; EXPECT_EQ(arr.NumElements(), arr2.NumElements()); EXPECT_EQ(arr.NumElements(), 2); } TEST(TensorArrayTest, CopyAssignToEmptyList) { auto arr = MakeTensorArrayForTest({}); auto target_arr = MakeTensorArrayForTest({2, 2}); target_arr.Resize(2); target_arr = arr; EXPECT_EQ(target_arr.NumElements(), 0); EXPECT_THAT(target_arr.ElementShape(), DimsAre({})); } TEST(TensorArrayTest, CopyListWithItem) { std::optional<TensorArray> arr = TensorArray(kTfLiteInt32, {}); arr->Resize(1); ASSERT_TRUE(arr->Set(0, MakeTensorWithData<int>({2}, {3, 4}))); TensorArray arr2{*arr}; EXPECT_EQ(arr->NumElements(), arr2.NumElements()); EXPECT_EQ(arr->At(0), arr2.At(0)); arr.reset(); EXPECT_THAT(arr2.At(0), DimsAre({2})); } TEST(TensorArrayTest, CopyAssignToListWithItem) { auto target_arr = MakeTensorArrayForTest({}); target_arr.Resize(2); ASSERT_TRUE(target_arr.Set(0, MakeTensorWithData<int>({2}, {3, 4}))); auto src_arr = MakeTensorArrayForTest({2, 2}); src_arr.Resize(1); target_arr = src_arr; EXPECT_EQ(target_arr.NumElements(), src_arr.NumElements()); EXPECT_EQ(target_arr.At(0), nullptr); } TEST(TensorArrayTest, CopyAssignFromListWithItem) { auto target_arr = MakeTensorArrayForTest({2, 2}); target_arr.Resize(1); auto src_arr = MakeTensorArrayForTest({}); src_arr.Resize(2); ASSERT_TRUE(src_arr.Set(0, MakeTensorWithData<int>({2}, {3, 4}))); target_arr = src_arr; EXPECT_EQ(target_arr.NumElements(), src_arr.NumElements()); EXPECT_EQ(src_arr.At(0), target_arr.At(0)); } TEST(TensorArrayTest, DeleteEmptyTensorArray) { TensorArray* arr = new TensorArray{kTfLiteInt32, {}}; delete arr; } TEST(TensorArrayTest, DeleteResizedEmptyTensorArray) { TensorArray* arr = new TensorArray{kTfLiteInt32, {}}; arr->Resize(2); delete arr; } TEST(OpaqueVariantTensorArrayDataTest, CastThroughVoidAndCopy) { TensorArray* arr = new TensorArray{kTfLiteFloat32, {}}; arr->Resize(2); ASSERT_TRUE(arr->Set(0, MakeTensorWithData<int>({2}, {3, 4}))); void* erased = static_cast<VariantData*>(arr); VariantData* d = static_cast<VariantData*>(erased); VariantData* copied_d = d->CloneTo(nullptr); auto* copied_arr = static_cast<TensorArray*>(copied_d); ASSERT_THAT(copied_arr->At(0), DimsAre({2})); ASSERT_THAT(arr->At(0), DimsAre({2})); ASSERT_EQ(arr->At(0), arr->At(0)); delete d; delete copied_d; } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/tensor_array.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/tensor_array_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1511abd5-098d-46f7-9426-d0179c2675d1
cpp
tensorflow/tensorflow
scatter_nd_op
tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc
tensorflow/core/kernels/scatter_nd_op_test.cc
#include <functional> #include "absl/status/status.h" #include "tensorflow/compiler/tf2xla/lib/scatter.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { namespace { Status ValidateUpdateShape(const TensorShape& buffer_shape, const TensorShape& indices_shape, const TensorShape& updates_shape, bool broadcast_scalar_update) { if (indices_shape.dims() < 1) { return errors::InvalidArgument( "indices shape must have >= 1 dimension; got ", indices_shape.DebugString()); } const int64_t num_index_dims = indices_shape.dim_size(indices_shape.dims() - 1); const int64_t batch_dim = indices_shape.dims() - 1; auto shape_err = [&]() { return errors::InvalidArgument( "Must have updates.shape = indices.shape[:batch_dim] + ", "buffer_shape[num_index_dims:], got updates.shape: ", updates_shape.DebugString(), ", indices.shape: ", indices_shape.DebugString(), ", buffer_shape: ", buffer_shape.DebugString(), ", num_index_dims: ", num_index_dims, ", and batch_dim: ", batch_dim); }; if (updates_shape.dims() == 0 && broadcast_scalar_update) { return absl::OkStatus(); } if (updates_shape.dims() < batch_dim) return shape_err(); if (buffer_shape.dims() < num_index_dims + (updates_shape.dims() - batch_dim)) { return shape_err(); } if (updates_shape.dims() != batch_dim + buffer_shape.dims() - num_index_dims) { return shape_err(); } for (int d = 0; d < batch_dim; ++d) { if (updates_shape.dim_size(d) != indices_shape.dim_size(d)) { return shape_err(); } } for (int d = 0; d < updates_shape.dims() - batch_dim; ++d) { if (updates_shape.dim_size(d + batch_dim) != buffer_shape.dim_size(d + num_index_dims)) { return shape_err(); } } return absl::OkStatus(); } class ScatterNdOp : public XlaOpKernel { public: explicit ScatterNdOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { DataType dtype = context->input_type(1); TensorShape indices_shape = context->InputShape(0); TensorShape updates_shape = context->InputShape(1); TensorShape buffer_shape; OP_REQUIRES_OK(context, context->ConstantInputAsShape(2, &buffer_shape)); OP_REQUIRES( context, TensorShapeUtils::IsVectorOrHigher(buffer_shape), errors::InvalidArgument("Output must be at least 1-D, ", "got shape: ", buffer_shape.DebugString())); OP_REQUIRES( context, buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 && updates_shape.num_elements() == 0), errors::InvalidArgument( "Indices and updates specified for empty output. indices shape: ", indices_shape.DebugString())); OP_REQUIRES_OK( context, ValidateUpdateShape(buffer_shape, indices_shape, updates_shape, false)); xla::XlaBuilder* builder = context->builder(); auto buffer = xla::Broadcast(XlaHelpers::Zero(builder, dtype), buffer_shape.dim_sizes()); auto indices = context->Input(0); auto updates = context->Input(1); auto combine = context->input_xla_type(1) == xla::PRED ? CombineBool : CombineNum; auto result = XlaScatter(buffer, updates, indices, true, false, combine, builder); OP_REQUIRES_OK(context, result.status()); context->SetOutput(0, result.value()); } private: static xla::XlaOp CombineNum(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { (void)builder; return xla::Add(x, y); } static xla::XlaOp CombineBool(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { (void)builder; return xla::Or(x, y); } }; REGISTER_XLA_OP(Name("ScatterNd").CompileTimeConstantInput("shape"), ScatterNdOp); void CompileTensorScatter( XlaOpKernelContext* context, const std::function<xla::XlaOp(xla::XlaOp, xla::XlaOp, xla::XlaBuilder*)>& combiner, bool broadcast_scalar_update) { TensorShape buffer_shape = context->InputShape(0); TensorShape indices_shape = context->InputShape(1); TensorShape updates_shape = context->InputShape(2); OP_REQUIRES( context, TensorShapeUtils::IsVectorOrHigher(buffer_shape), errors::InvalidArgument("Output must be at least 1-D, ", "got shape: ", buffer_shape.DebugString())); OP_REQUIRES( context, buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 && updates_shape.num_elements() == 0), errors::InvalidArgument( "Indices and updates specified for empty output. indices shape: ", indices_shape.DebugString())); OP_REQUIRES_OK(context, ValidateUpdateShape(buffer_shape, indices_shape, updates_shape, broadcast_scalar_update)); xla::XlaBuilder* builder = context->builder(); auto buffer = context->Input(0); auto indices = context->Input(1); auto updates = context->Input(2); auto result = XlaScatter(buffer, updates, indices, true, false, combiner, builder); OP_REQUIRES_OK(context, result.status()); context->SetOutput(0, result.value()); } class TensorScatterAddOp : public XlaOpKernel { public: explicit TensorScatterAddOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { CompileTensorScatter( context, [](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) { return xla::Add(x, y); }, true); } }; class TensorScatterMaxOp : public XlaOpKernel { public: explicit TensorScatterMaxOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { CompileTensorScatter( context, [](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) { return xla::Max(x, y); }, false); } }; class TensorScatterMinOp : public XlaOpKernel { public: explicit TensorScatterMinOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { CompileTensorScatter( context, [](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) { return xla::Min(x, y); }, false); } }; class TensorScatterSubOp : public XlaOpKernel { public: explicit TensorScatterSubOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { CompileTensorScatter( context, [](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) { return xla::Sub(x, y); }, false); } }; class TensorScatterUpdateOp : public XlaOpKernel { public: explicit TensorScatterUpdateOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { CompileTensorScatter( context, [](xla::XlaOp, xla::XlaOp y, xla::XlaBuilder*) { return y; }, true); } }; REGISTER_XLA_OP(Name("TensorScatterAdd"), TensorScatterAddOp); REGISTER_XLA_OP(Name("TensorScatterMax"), TensorScatterMaxOp); REGISTER_XLA_OP(Name("TensorScatterMin"), TensorScatterMinOp); REGISTER_XLA_OP(Name("TensorScatterSub"), TensorScatterSubOp); REGISTER_XLA_OP(Name("TensorScatterUpdate"), TensorScatterUpdateOp); } }
#include <cstdint> #include <functional> #include <memory> #include <vector> #include "absl/status/status.h" #include "absl/strings/match.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class TensorScatterUpdateOpTest : public OpsTestBase { protected: void MakeOp(DataType variable_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "TensorScatterUpdate") .Input(FakeInput(variable_type)) .Input(FakeInput(index_type)) .Input(FakeInput(variable_type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(TensorScatterUpdateOpTest, Simple_TwoD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001, 10002, 0, 0, 0, 777, 778, 779}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(TensorScatterUpdateOpTest, Simple_Two64) { MakeOp(DT_FLOAT, DT_INT64); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int64_t>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001, 10002, 0, 0, 0, 777, 778, 779}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(TensorScatterUpdateOpTest, Simple_ZeroD) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({1, 1}), {3}); AddInputFromArray<float>(TensorShape({1, 1}), {101}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1})); test::FillValues<float>(&expected, {0, 0, 0, 101, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(TensorScatterUpdateOpTest, Simple_OneD) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1})); test::FillValues<float>(&expected, {100, 0, 102, 0, 101}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(TensorScatterUpdateOpTest, HigherRank) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({2, 3, 1}), {0, 4, 2, 1, 3, 6}); AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({8})); test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(TensorScatterUpdateOpTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "indices[1] = [99] does not index into shape [5,3]")) << s; } class TensorScatterUpdateOpErrorOnBadIndicesTest : public OpsTestBase { protected: void MakeOp(DataType variable_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "TensorScatterUpdate") .Input(FakeInput(variable_type)) .Input(FakeInput(index_type)) .Input(FakeInput(variable_type)) .Attr("bad_indices_policy", "ERROR") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(TensorScatterUpdateOpErrorOnBadIndicesTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "indices[1] = [99] does not index into shape [5,3]")) << s; } class TensorScatterUpdateOpIgnoreBadIndicesTest : public OpsTestBase { protected: void MakeOp(DataType variable_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "TensorScatterUpdate") .Input(FakeInput(variable_type)) .Input(FakeInput(index_type)) .Input(FakeInput(variable_type)) .Attr("bad_indices_policy", "IGNORE") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(TensorScatterUpdateOpIgnoreBadIndicesTest, DropOutOfRangeIndices) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2}); AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1})); test::FillValues<float>(&expected, {100, 0, 102, 0, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } class ScatterNdUpdateOpTest : public OpsTestBase { protected: void MakeOp(DataType variable_ref_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate") .Input(FakeInput(variable_ref_type)) .Input(FakeInput(index_type)) .Input(FakeInput(RemoveRefType(variable_ref_type))) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(ScatterNdUpdateOpTest, Simple_TwoD32) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001, 10002, 0, 0, 0, 777, 778, 779}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterNdUpdateOpTest, Simple_Two64) { MakeOp(DT_FLOAT_REF, DT_INT64); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int64_t>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001, 10002, 0, 0, 0, 777, 778, 779}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterNdUpdateOpTest, Simple_ZeroD) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({1}), {3}); AddInputFromArray<float>(TensorShape({1}), {101}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5})); test::FillValues<float>(&expected, {0, 0, 0, 101, 0}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterNdUpdateOpTest, Simple_OneD) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3}), {100, 101, 102}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5})); test::FillValues<float>(&expected, {100, 0, 102, 0, 101}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterNdUpdateOpTest, HigherRank) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({2, 3, 1}), {0, 4, 2, 1, 3, 6}); AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({8})); test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterNdUpdateOpTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "indices[1] = [99] does not index into shape [5,3]")) << s; } TEST_F(ScatterNdUpdateOpTest, Error_WrongDimsIndices) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({1, 3, 1}), {0, 4, 99}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "Dimensions [0,1) of indices[shape=[1,3,1]] = 1 must match dimensions " "[0,1) of updates[shape=[3,3]] = 3")) << s; } TEST_F(ScatterNdUpdateOpTest, Error_MismatchedParamsAndUpdateDimensions) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>( TensorShape({3, 4}), {100, 101, 102, 103, 777, 778, 779, 780, 10000, 10001, 10002, 10004}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "Dimensions [1,2) of input[shape=[5,3]] must match dimensions [1,2) of " "updates[shape=[3,4]]")) << s; } TEST_F(ScatterNdUpdateOpTest, Error_MismatchedIndicesAndUpdateDimensions) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({2, 3}), {100, 101, 102, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "Dimensions [0,1) of indices[shape=[3,1]] = 3 must match dimensions [0,1)" " of updates[shape=[2,3]] = 2")) << s; } class ScatterNdUpdateOpErrorOnBadIndicesTest : public OpsTestBase { protected: void MakeOp(DataType variable_ref_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate") .Input(FakeInput(variable_ref_type)) .Input(FakeInput(index_type)) .Input(FakeInput(RemoveRefType(variable_ref_type))) .Attr("bad_indices_policy", "ERROR") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(ScatterNdUpdateOpErrorOnBadIndicesTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "indices[1] = [99] does not index into shape [5,3]")) << s; } class ScatterNdUpdateOpIgnoreBadIndicesTest : public OpsTestBase { protected: void MakeOp(DataType variable_ref_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate") .Input(FakeInput(variable_ref_type)) .Input(FakeInput(index_type)) .Input(FakeInput(RemoveRefType(variable_ref_type))) .Attr("bad_indices_policy", "IGNORE") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(ScatterNdUpdateOpIgnoreBadIndicesTest, DropOutOfRangeIndices) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2}); AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1})); test::FillValues<float>(&expected, {100, 0, 102, 0, 0}); test::ExpectTensorEqual<float>(expected, *mutable_input(0).tensor); } class ScatterNdUpdateOpConstructionTest : public OpsTestBase {}; TEST_F(ScatterNdUpdateOpConstructionTest, Error_BadIndicesPolicyInvalid) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY") .Finalize(node_def())); EXPECT_NE(InitOp(), absl::OkStatus()); } class ScatterNdOpTest : public OpsTestBase { protected: void MakeOp(DataType variable_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd") .Input(FakeInput(index_type)) .Input(FakeInput(variable_type)) .Input(FakeInput(DT_INT32)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(ScatterNdOpTest, Simple_OneD) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102}); AddInputFromArray<int32>(TensorShape({2}), {5, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1})); test::FillValues<float>(&expected, {100, 0, 102, 0, 101}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(ScatterNdOpTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2}); AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102}); AddInputFromArray<int32>(TensorShape({2}), {5, 1}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "indices[1] = [5] does not index into shape [5,1]")) << s; } class ScatterNdOpErrorOnBadIndicesTest : public OpsTestBase { protected: void MakeOp(DataType variable_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd") .Input(FakeInput(index_type)) .Input(FakeInput(variable_type)) .Input(FakeInput(DT_INT32)) .Attr("bad_indices_policy", "ERROR") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(ScatterNdOpErrorOnBadIndicesTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2}); AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102}); AddInputFromArray<int32>(TensorShape({2}), {5, 1}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "indices[1] = [5] does not index into shape [5,1]")) << s; } class ScatterNdOpIgnoreBadIndicesTest : public OpsTestBase { protected: void MakeOp(DataType variable_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd") .Input(FakeInput(index_type)) .Input(FakeInput(variable_type)) .Input(FakeInput(DT_INT32)) .Attr("bad_indices_policy", "IGNORE") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(ScatterNdOpIgnoreBadIndicesTest, DropOutOfRangeIndices) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2}); AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102}); AddInputFromArray<int32>(TensorShape({2}), {5, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1})); test::FillValues<float>(&expected, {100, 0, 102, 0, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } class ScatterNdOpConstructionTest : public OpsTestBase {}; TEST_F(ScatterNdOpConstructionTest, Error_BadIndicesPolicyInvalid) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY") .Finalize(node_def())); EXPECT_NE(InitOp(), absl::OkStatus()); } class ScatterNdUpdateBM : public ScatterNdUpdateOpTest { public: void TestBody() override {} void MakeBenchmarkOp(const char* op, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", op) .Input(FakeInput(DT_FLOAT_REF)) .Input(FakeInput(index_type)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_CHECK_OK(InitOp()); } }; template <typename Index> void BM_ScatterNdHelper(::testing::benchmark::State& state, int embedding_size, const char* op) { const int kRows = 10000000 / embedding_size; std::vector<float> values; values.reserve(kRows); for (int i = 0; i < kRows * embedding_size; i++) { values.push_back(i); } const int kNumUpdates = 1000; random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); std::vector<Index> indices; std::vector<float> updates; for (int i = 0; i < kNumUpdates; i++) { indices.push_back(rnd.Uniform(kRows)); for (int j = 0; j < embedding_size; j++) { updates.push_back(i * 10 + j); } } ScatterNdUpdateBM bm; bm.MakeBenchmarkOp(op, DataTypeToEnum<Index>::v()); bm.AddInputFromArray<float>(TensorShape({kRows, embedding_size}), values); bm.AddInputFromArray<Index>(TensorShape({kNumUpdates}), indices); bm.AddInputFromArray<float>(TensorShape({kNumUpdates, embedding_size}), updates); for (auto i : state) { Status s = bm.RunOpKernel(); } state.SetItemsProcessed((static_cast<int64_t>(kNumUpdates) * embedding_size) * state.iterations()); } void BM_ScatterNdUpdateInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterNdHelper<int32>(state, embedding_size, "ScatterNdUpdate"); } void BM_ScatterNdUpdateInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterNdHelper<int64_t>(state, embedding_size, "ScatterNdUpdate"); } void BM_ScatterNdAddInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterNdHelper<int32>(state, embedding_size, "ScatterNdAdd"); } void BM_ScatterNdAddInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterNdHelper<int64_t>(state, embedding_size, "ScatterNdAdd"); } BENCHMARK(BM_ScatterNdUpdateInt32) ->Arg(1) ->Arg(10) ->Arg(64) ->Arg(256) ->Arg(1024); BENCHMARK(BM_ScatterNdUpdateInt64) ->Arg(1) ->Arg(10) ->Arg(64) ->Arg(256) ->Arg(1024); BENCHMARK(BM_ScatterNdAddInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterNdAddInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scatter_nd_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
02c13d34-4aaf-42ef-bae3-8c05db8365ff
cpp
tensorflow/tensorflow
lrn_op
tensorflow/core/kernels/lrn_op.cc
tensorflow/core/kernels/lrn_op_test.cc
#define EIGEN_USE_THREADS #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/errors.h" #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) #include "xla/tsl/framework/contraction/eigen_contraction_kernel.h" #endif #if !defined(IS_MOBILE_PLATFORM) #include "tensorflow/core/util/work_sharder.h" #endif #if GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #endif #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/gpu_utils.h" #if TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/conv_ops_gpu.h" #endif #include "tensorflow/core/platform/stream_executor.h" #include "tensorflow/core/util/stream_executor_util.h" #endif namespace tensorflow { namespace { const int kSingleThreadedLRNDepthCutoff = 384; template <typename T> void GetBandMatrix(int depth, int depth_radius, Eigen::Tensor<T, 2, Eigen::RowMajor>* result) { result->setZero(); for (int row = 0; row < depth; ++row) { const int begin = std::max<int>(0, row - depth_radius); const int end = std::min<int>(depth, row + depth_radius + 1); Eigen::DSizes<Eigen::DenseIndex, 2> start(row, begin); Eigen::DSizes<Eigen::DenseIndex, 2> sizes(1, end - begin); result->slice(start, sizes).setConstant(T(1)); } } } typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Device, typename T> struct LaunchLRN; template <typename T> struct LaunchLRN<CPUDevice, T> { LaunchLRN(int depth_radius, T bias, T alpha, T beta) : depth_radius_(depth_radius), bias_(bias), alpha_(alpha), beta_(beta) {} void launch(OpKernelContext* context, OpKernel* kernel, const Tensor& in, Tensor* output) { const int batch = static_cast<int>(in.dim_size(0)); const int rows = static_cast<int>(in.dim_size(1)); const int cols = static_cast<int>(in.dim_size(2)); const int depth = static_cast<int>(in.dim_size(3)); #if defined(IS_MOBILE_PLATFORM) SingleThreadedLRN(in, batch, rows, cols, depth, output); #else if (depth > kSingleThreadedLRNDepthCutoff && (beta_ == T(0.5) || beta_ == T(1))) { SingleThreadedLRN(in, batch, rows, cols, depth, output); return; } const int nodes = cols * rows; auto in_shaped = in.shaped<T, 2>({nodes * batch, depth}); Eigen::Tensor<T, 2, Eigen::RowMajor> multiplier(depth, depth); GetBandMatrix<T>(depth, depth_radius_, &multiplier); auto out_shaped = output->shaped<T, 2>({nodes * batch, depth}); Eigen::array<DimPair, 1> dims = {{DimPair(1, 0)}}; auto tmp = in_shaped.square().contract(multiplier, dims) * alpha_ + bias_; if (beta_ == T(1)) { out_shaped.device(context->eigen_cpu_device()) = in_shaped * tmp.inverse(); } else if (beta_ == T(0.5)) { out_shaped.device(context->eigen_cpu_device()) = in_shaped * tmp.rsqrt(); } else { out_shaped.device(context->eigen_cpu_device()) = in_shaped * (tmp.log() * -beta_).exp(); } #endif } private: typedef typename Eigen::Tensor<T, 1, Eigen::RowMajor>::DimensionPair DimPair; void SingleThreadedLRN(const Tensor& in, const int batch, const int rows, const int cols, const int depth, Tensor* out) { Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> data_in( in.flat<T>().data(), depth, batch * rows * cols); Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> data_out( out->flat<T>().data(), depth, batch * rows * cols); const int double_depth_radius = depth_radius_ * 2; Eigen::Matrix<T, Eigen::Dynamic, 1> padded_square(data_in.rows() + double_depth_radius); padded_square.setZero(); for (int r = 0; r < data_in.cols(); ++r) { padded_square.block(depth_radius_, 0, data_out.rows(), 1) = data_in.col(r).cwiseProduct(data_in.col(r)) * alpha_; T accumulated_scale(0); for (int i = 0; i < double_depth_radius; ++i) { accumulated_scale += padded_square(i); } for (int i = 0; i < data_in.rows(); ++i) { accumulated_scale += padded_square(i + double_depth_radius); data_out(i, r) = bias_ + accumulated_scale; accumulated_scale -= padded_square(i); } } if (beta_ == T(1)) { data_out.array() = data_in.array() * data_out.array().inverse(); } else if (beta_ == T(0.5)) { data_out.array() = data_in.array() * data_out.array().rsqrt(); } else { data_out.array() = data_in.array() * (data_out.array().log() * -beta_).exp(); } } int depth_radius_; T bias_; T alpha_; T beta_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> struct LaunchLRN<GPUDevice, T> { LaunchLRN(int depth_radius, T bias, T alpha, T beta) : depth_radius_(depth_radius), bias_(bias), alpha_(alpha), beta_(beta) {} void launch(OpKernelContext* context, OpKernel* kernel, const Tensor& in, Tensor* output) { #if GOOGLE_CUDA OP_REQUIRES( context, beta_ >= 0.01, errors::InvalidArgument("cuDNN requires beta >= 0.01, got: ", beta_)); OP_REQUIRES( context, depth_radius_ > 0 && depth_radius_ <= 7, errors::InvalidArgument("cuDNN requires depth_radius in [1, 7], got: ", depth_radius_)); OP_REQUIRES( context, bias_ >= 1e-5, errors::InvalidArgument("cuDNN requires bias >= 1e-5, got: ", bias_)); const int batch = static_cast<int>(in.dim_size(0)); const int rows = static_cast<int>(in.dim_size(1)); const int cols = static_cast<int>(in.dim_size(2)); const int depth = static_cast<int>(in.dim_size(3)); se::dnn::BatchDescriptor dimensions_desc; dimensions_desc.set_count(batch) .set_height(rows) .set_width(cols) .set_feature_map_count(depth) .set_layout(se::dnn::DataLayout::kBatchYXDepth); se::dnn::NormalizeDescriptor normalize_desc; normalize_desc.set_bias(bias_) .set_range(depth_radius_) .set_alpha(alpha_) .set_beta(beta_); auto input_data = StreamExecutorUtil::AsDeviceMemory<T>(in); auto output_data = StreamExecutorUtil::AsDeviceMemory<T>(*output); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); auto dnn = stream->parent()->AsDnn(); OP_REQUIRES(context, dnn != nullptr, absl::InternalError("No DNN support for stream.")); bool status = dnn->DoNormalizeWithDimensions( stream, normalize_desc, dimensions_desc, input_data, &output_data); OP_REQUIRES(context, status, errors::Internal("NormalizeWithDimensions launch failed")); #elif TENSORFLOW_USE_ROCM const int batch = static_cast<int>(in.dim_size(0)); const int rows = static_cast<int>(in.dim_size(1)); const int cols = static_cast<int>(in.dim_size(2)); const int depth = static_cast<int>(in.dim_size(3)); Tensor transformed_input; TensorShape transformed_input_shape; OP_REQUIRES_OK( context, ShapeFromFormatWithStatus(FORMAT_NCHW, in.shape(), FORMAT_NHWC, &transformed_input_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_input_shape, &transformed_input)); functor::NHWCToNCHW<GPUDevice, T, 4>()(context->eigen_device<GPUDevice>(), in.tensor<T, 4>(), transformed_input.tensor<T, 4>()); Tensor transformed_output; TensorShape transformed_output_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NCHW, output->shape(), FORMAT_NHWC, &transformed_output_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_output_shape, &transformed_output)); stream_executor::dnn::BatchDescriptor dimensions_desc; dimensions_desc.set_count(batch) .set_height(rows) .set_width(cols) .set_feature_map_count(depth) .set_layout(stream_executor::dnn::DataLayout::kBatchDepthYX); stream_executor::dnn::NormalizeDescriptor normalize_desc; normalize_desc.set_bias(bias_) .set_range(depth_radius_) .set_alpha(alpha_) .set_beta(beta_); auto input_data = AsDeviceMemory(transformed_input.template flat<T>().data(), transformed_input.template flat<T>().size()); auto output_data = AsDeviceMemory(transformed_output.template flat<T>().data(), transformed_output.template flat<T>().size()); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); auto dnn = stream->parent()->AsDnn(); OP_REQUIRES(context, dnn != nullptr, absl::InternalError("No DNN support for stream.")); bool status = dnn->DoNormalizeWithDimensions( stream, normalize_desc, dimensions_desc, input_data, &output_data); OP_REQUIRES(context, status, errors::Internal("NormalizeWithDimensions launch failed")); auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::NCHWToNHWC<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), toConstTensor(transformed_output).template tensor<T, 4>(), output->tensor<T, 4>()); #endif } int depth_radius_; T bias_; T alpha_; T beta_; }; #endif template <typename Device, typename T> class LRNOp : public OpKernel { public: explicit LRNOp(OpKernelConstruction* context) : OpKernel(context) { int64_t depth_radius64; OP_REQUIRES_OK(context, context->GetAttr("depth_radius", &depth_radius64)); OP_REQUIRES( context, FastBoundsCheck(depth_radius64, std::numeric_limits<int>::max()), errors::InvalidArgument("depth_radius = ", depth_radius64, " larger than int max")); depth_radius_ = static_cast<int>(depth_radius64); float tmp; OP_REQUIRES_OK(context, context->GetAttr("bias", &tmp)); bias_ = T(tmp); OP_REQUIRES_OK(context, context->GetAttr("alpha", &tmp)); alpha_ = T(tmp); OP_REQUIRES_OK(context, context->GetAttr("beta", &tmp)); beta_ = T(tmp); } void Compute(OpKernelContext* context) override { const Tensor& in = context->input(0); OP_REQUIRES(context, in.dims() == 4, errors::InvalidArgument("in must be 4-dimensional")); OP_REQUIRES( context, FastBoundsCheck(in.NumElements(), std::numeric_limits<int>::max()), errors::InvalidArgument("argument to LRN too large")); const int batch = static_cast<int>(in.dim_size(0)); const int rows = static_cast<int>(in.dim_size(1)); const int cols = static_cast<int>(in.dim_size(2)); const int depth = static_cast<int>(in.dim_size(3)); OP_REQUIRES(context, (depth + depth_radius_) <= std::numeric_limits<int>::max(), errors::InvalidArgument("depth ", depth, " + depth_radius ", depth_radius_, " exceeds int max.")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({batch, rows, cols, depth}), &output)); LaunchLRN<Device, T> launcher(depth_radius_, bias_, alpha_, beta_); launcher.launch(context, this, in, output); } private: int depth_radius_; T bias_; T alpha_; T beta_; }; #define REGISTER_CPU(T) \ REGISTER_KERNEL_BUILDER( \ Name("LRN").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ LRNOp<CPUDevice, T>); TF_CALL_float(REGISTER_CPU); TF_CALL_half(REGISTER_CPU); #undef REGISTER_CPU #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU(T) \ REGISTER_KERNEL_BUILDER( \ Name("LRN").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ LRNOp<GPUDevice, T>); TF_CALL_float(REGISTER_GPU); #undef REGISTER_GPU #endif #if !defined(IS_MOBILE_PLATFORM) template <typename Device, typename T> struct LaunchLRNGrad; template <typename T> struct LaunchLRNGrad<CPUDevice, T> { LaunchLRNGrad(int depth_radius, T bias, T alpha, T beta) : depth_radius_(depth_radius), bias_(bias), alpha_(alpha), beta_(beta), alpha_beta_2_(T(-2) * alpha * beta) {} void launch(OpKernelContext* context, OpKernel* kernel, const Tensor& in_grads, const Tensor& in_image, const Tensor& out_image, Tensor* output) { const int64_t batch = in_grads.dim_size(0); const int64_t rows = in_grads.dim_size(1); const int64_t cols = in_grads.dim_size(2); const int64_t depth = in_grads.dim_size(3); const auto nodes = cols * rows; auto grads_shaped = in_grads.shaped<T, 2>({nodes * batch, depth}); auto in_shaped = in_image.shaped<T, 2>({nodes * batch, depth}); auto activations = out_image.shaped<T, 2>({nodes * batch, depth}); auto out_shaped = output->shaped<T, 2>({nodes * batch, depth}); out_shaped.setZero(); auto shard = [this, activations, in_shaped, grads_shaped, out_shaped, depth](int64_t begin, int64_t end) { for (int64_t i = begin; i < end; ++i) { for (int64_t j = 0; j < depth; ++j) { T gs = grads_shaped(i, j); if (gs == T(0)) continue; int64_t depth_begin = std::max<int64_t>(0, j - depth_radius_); int64_t depth_end = std::min<int64_t>(depth, j + depth_radius_ + 1); T norm(0); for (int64_t k = depth_begin; k < depth_end; ++k) { norm += in_shaped(i, k) * in_shaped(i, k); } norm = alpha_ * norm + bias_; DCHECK_GT(norm, T(1e-6)); T pre_computed_pow = Eigen::numext::pow(norm, -beta_); T activations_ab2 = alpha_beta_2_ * activations(i, j); for (int64_t k = depth_begin; k < depth_end; ++k) { T dyi = in_shaped(i, k) * activations_ab2 / norm; if (k == j) { dyi += pre_computed_pow; } dyi *= gs; const_cast<typename TTypes<T, 2>::Tensor&>(out_shaped)(i, k) += dyi; } } } }; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, nodes * batch, depth * depth, shard); } int depth_radius_; T bias_; T alpha_; T beta_; T alpha_beta_2_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> struct LaunchLRNGrad<GPUDevice, T> { LaunchLRNGrad(int depth_radius, T bias, T alpha, T beta) : depth_radius_(depth_radius), bias_(bias), alpha_(alpha), beta_(beta) {} void launch(OpKernelContext* context, OpKernel* kernel, const Tensor& in_grads, const Tensor& in_image, const Tensor& out_image, Tensor* output) { #if GOOGLE_CUDA OP_REQUIRES( context, beta_ >= 0.01, errors::InvalidArgument("cuDNN requires beta >= 0.01, got: ", beta_)); OP_REQUIRES( context, depth_radius_ > 0 && depth_radius_ <= 7, errors::InvalidArgument("cuDNN requires depth_radius in [1, 7], got: ", depth_radius_)); OP_REQUIRES( context, bias_ >= 1e-5, errors::InvalidArgument("cuDNN requires bias >= 1e-5, got: ", bias_)); const int64_t batch = in_grads.dim_size(0); const int64_t rows = in_grads.dim_size(1); const int64_t cols = in_grads.dim_size(2); const int64_t depth = in_grads.dim_size(3); se::dnn::BatchDescriptor dimensions_desc; dimensions_desc.set_count(batch) .set_height(rows) .set_width(cols) .set_feature_map_count(depth) .set_layout(se::dnn::DataLayout::kBatchYXDepth); se::dnn::NormalizeDescriptor normalize_desc; normalize_desc.set_bias(bias_) .set_range(depth_radius_) .set_alpha(alpha_) .set_beta(beta_); auto input_grads_data = StreamExecutorUtil::AsDeviceMemory<T>(in_grads); auto input_image_data = StreamExecutorUtil::AsDeviceMemory<T>(in_image); auto output_image_data = StreamExecutorUtil::AsDeviceMemory<T>(out_image); auto output_grads_data = StreamExecutorUtil::AsDeviceMemory<T>(*output); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); auto dnn = stream->parent()->AsDnn(); OP_REQUIRES(context, dnn != nullptr, absl::InternalError("No DNN support for stream.")); bool status = dnn->DoNormalizeBackwardWithDimensions( stream, normalize_desc, dimensions_desc, input_image_data, output_image_data, input_grads_data, &output_grads_data, nullptr); OP_REQUIRES( context, status, errors::Internal("NormalizeBackwardWithDimensions launch failed")); #elif TENSORFLOW_USE_ROCM const int64 batch = in_grads.dim_size(0); const int64 rows = in_grads.dim_size(1); const int64 cols = in_grads.dim_size(2); const int64 depth = in_grads.dim_size(3); Tensor transformed_in_grads; TensorShape transformed_in_grads_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NCHW, in_grads.shape(), FORMAT_NHWC, &transformed_in_grads_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_in_grads_shape, &transformed_in_grads)); functor::NHWCToNCHW<GPUDevice, T, 4>()(context->eigen_device<GPUDevice>(), in_grads.tensor<T, 4>(), transformed_in_grads.tensor<T, 4>()); Tensor transformed_in_image; TensorShape transformed_in_image_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NCHW, in_image.shape(), FORMAT_NHWC, &transformed_in_image_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_in_image_shape, &transformed_in_image)); functor::NHWCToNCHW<GPUDevice, T, 4>()(context->eigen_device<GPUDevice>(), in_image.tensor<T, 4>(), transformed_in_image.tensor<T, 4>()); Tensor transformed_out_image; TensorShape transformed_out_image_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NCHW, out_image.shape(), FORMAT_NHWC, &transformed_out_image_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_out_image_shape, &transformed_out_image)); functor::NHWCToNCHW<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), out_image.tensor<T, 4>(), transformed_out_image.tensor<T, 4>()); Tensor transformed_output; TensorShape transformed_output_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NCHW, output->shape(), FORMAT_NHWC, &transformed_output_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_output_shape, &transformed_output)); stream_executor::dnn::BatchDescriptor dimensions_desc; dimensions_desc.set_count(batch) .set_height(rows) .set_width(cols) .set_feature_map_count(depth) .set_layout(stream_executor::dnn::DataLayout::kBatchDepthYX); stream_executor::dnn::NormalizeDescriptor normalize_desc; normalize_desc.set_bias(bias_) .set_range(depth_radius_) .set_alpha(alpha_) .set_beta(beta_); auto input_grads_data = AsDeviceMemory(transformed_in_grads.template flat<T>().data(), transformed_in_grads.template flat<T>().size()); auto input_image_data = AsDeviceMemory(transformed_in_image.template flat<T>().data(), transformed_in_image.template flat<T>().size()); auto output_image_data = AsDeviceMemory(transformed_out_image.template flat<T>().data(), transformed_out_image.template flat<T>().size()); auto output_grads_data = AsDeviceMemory(transformed_output.template flat<T>().data(), transformed_output.template flat<T>().size()); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); static int64 NormalizeBackwardScratchSize = GetDnnWorkspaceLimit( "TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32 ); DnnScratchAllocator scratch_allocator(NormalizeBackwardScratchSize, context); auto dnn = stream->parent()->AsDnn(); OP_REQUIRES(context, dnn != nullptr, absl::InternalError("No DNN support for stream.")); bool status = dnn->DoNormalizeBackwardWithDimensions( stream, normalize_desc, dimensions_desc, input_image_data, output_image_data, input_grads_data, &output_grads_data, nullptr, &scratch_allocator); OP_REQUIRES( context, status, errors::Internal("NormalizeBackwardWithDimensions launch failed")); auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::NCHWToNHWC<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), toConstTensor(transformed_output).template tensor<T, 4>(), output->tensor<T, 4>()); #endif } int depth_radius_; T bias_; T alpha_; T beta_; }; #endif template <typename Device, typename T> class LRNGradOp : public OpKernel { public: explicit LRNGradOp(OpKernelConstruction* context) : OpKernel(context) { int64_t depth_radius64; OP_REQUIRES_OK(context, context->GetAttr("depth_radius", &depth_radius64)); OP_REQUIRES( context, FastBoundsCheck(depth_radius64, std::numeric_limits<int>::max()), errors::InvalidArgument("depth_radius = ", depth_radius64, " larger than int max")); depth_radius_ = static_cast<int>(depth_radius64); float tmp; OP_REQUIRES_OK(context, context->GetAttr("bias", &tmp)); bias_ = T(tmp); OP_REQUIRES_OK(context, context->GetAttr("alpha", &tmp)); alpha_ = T(tmp); OP_REQUIRES_OK(context, context->GetAttr("beta", &tmp)); beta_ = T(tmp); } void Compute(OpKernelContext* context) override { const Tensor& in_grads = context->input(0); const Tensor& in_image = context->input(1); const Tensor& out_image = context->input(2); OP_REQUIRES(context, in_grads.dims() == 4 && in_image.dims() == 4, errors::InvalidArgument("inputs must be 4-dimensional")); const int64_t batch = in_grads.dim_size(0); const int64_t rows = in_grads.dim_size(1); const int64_t cols = in_grads.dim_size(2); const int64_t depth = in_grads.dim_size(3); OP_REQUIRES( context, in_image.dim_size(0) == batch && in_image.dim_size(1) == rows && in_image.dim_size(2) == cols && in_image.dim_size(3) == depth && out_image.dim_size(0) == batch && out_image.dim_size(1) == rows && out_image.dim_size(2) == cols && out_image.dim_size(3) == depth && out_image.dims() == 4, errors::InvalidArgument( "input_grads, input_image, and out_image should have the same " "shape")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({batch, rows, cols, depth}), &output)); LaunchLRNGrad<Device, T> launcher(depth_radius_, bias_, alpha_, beta_); launcher.launch(context, this, in_grads, in_image, out_image, output); } private: int depth_radius_; T bias_; T alpha_; T beta_; }; #define REGISTER_CPU(T) \ REGISTER_KERNEL_BUILDER( \ Name("LRNGrad").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ LRNGradOp<CPUDevice, T>); TF_CALL_float(REGISTER_CPU); TF_CALL_half(REGISTER_CPU); #undef REGISTER_CPU #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU(T) \ REGISTER_KERNEL_BUILDER( \ Name("LRNGrad").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ LRNGradOp<GPUDevice, T>); TF_CALL_float(REGISTER_GPU); #undef REGISTER_GPU #endif #endif }
#include <functional> #include <memory> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static const float tol_ = 1e-4; class LRNFloatTest : public OpsTestBase { protected: LRNFloatTest() : philox_(123, 17), rand_(&philox_) {} int GetIntAttr(const string& name) { int value; TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value)); return value; } float GetFloatAttr(const string& name) { float value; TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value)); return value; } bool Compare() { const auto& input = GetInput(0); const int64_t batch_size = input.dim_size(0); const int64_t rows = input.dim_size(1); const int64_t cols = input.dim_size(2); const int64_t depth = input.dim_size(3); const int64_t rest = cols * rows * batch_size; const int64_t depth_radius = GetIntAttr("depth_radius"); const float bias = GetFloatAttr("bias"); const float alpha = GetFloatAttr("alpha"); const float beta = GetFloatAttr("beta"); Eigen::Tensor<float, 4, Eigen::RowMajor> expected(batch_size, rows, cols, depth); auto out = expected.reshape(Eigen::DSizes<Eigen::Index, 2>{rest, depth}); auto in = input.shaped<float, 2>({rest, depth}); for (int64_t i = 0; i < rest; ++i) { Eigen::Tensor<float, 1, Eigen::RowMajor> out_col(depth); for (int64_t d = 0; d < depth; ++d) { float denom = 0.0f; for (int64_t r = std::max(int64_t{0}, d - depth_radius); r < std::min(depth, d + depth_radius + 1); ++r) { denom += in(i, r) * in(i, r); } denom = std::pow(denom * alpha + bias, beta); out_col(d) = in(i, d) / denom; } out.chip<0>(i) = out_col; } auto actual = GetOutput(0)->tensor<float, 4>(); Eigen::Tensor<float, 0, Eigen::RowMajor> sum = ((expected - actual).abs() > actual.constant(tol_)) .select(actual.constant(1), actual.constant(0)) .sum(); return sum() == 0; } random::PhiloxRandom philox_; random::SimplePhilox rand_; }; TEST_F(LRNFloatTest, Depth96) { TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN") .Input(FakeInput()) .Attr("depth_radius", 5) .Attr("bias", 1.0f) .Attr("alpha", 0.1f) .Attr("beta", 2.0f) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInput<float>(TensorShape({1, 1, 1, 96}), [](int i) -> float { return i + 1; }); TF_ASSERT_OK(RunOpKernel()); auto actual = GetOutput(0)->tensor<float, 4>(); EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_); EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_); EXPECT_NEAR(64. / (2272.1 * 2272.1), actual(0, 0, 0, 63), tol_); EXPECT_NEAR(65. / (2736.5 * 2736.5), actual(0, 0, 0, 64), tol_); EXPECT_NEAR(96. / (5248.1 * 5248.1), actual(0, 0, 0, 95), tol_); EXPECT_TRUE(Compare()); } TEST_F(LRNFloatTest, Depth16) { TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN") .Input(FakeInput()) .Attr("depth_radius", 5) .Attr("bias", 1.0f) .Attr("alpha", 0.1f) .Attr("beta", 2.0f) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInput<float>(TensorShape({1, 1, 1, 16}), [](int i) -> float { return i + 1; }); TF_ASSERT_OK(RunOpKernel()); auto actual = GetOutput(0)->tensor<float, 4>(); EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_); EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_); EXPECT_NEAR(16. / (112.1 * 112.1), actual(0, 0, 0, 15), tol_); EXPECT_TRUE(Compare()); } static double RndGaussian(random::SimplePhilox* rnd) { double x1, x2; double r; do { x1 = 2 * rnd->RandDouble() - 1; x2 = 2 * rnd->RandDouble() - 1; r = x1 * x1 + x2 * x2; } while (r == 0 || r >= 1.0); double w = sqrt(-2.0 * log(r) / r); return x1 * w; } #define TCASE(NAME, DEPTH, BATCH, DEPTH_RADIUS, BIAS, ALPHA, BETA) \ TEST_F(LRNFloatTest, NAME) { \ TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN") \ .Input(FakeInput()) \ .Attr("depth_radius", (DEPTH_RADIUS)) \ .Attr("bias", (BIAS)) \ .Attr("alpha", ((ALPHA) / 10)) \ .Attr("beta", (BETA)) \ .Finalize(node_def())); \ TF_ASSERT_OK(InitOp()); \ AddInput<float>(TensorShape({BATCH, 1, 1, DEPTH}), \ [this](int i) -> float { return RndGaussian(&rand_); }); \ TF_ASSERT_OK(RunOpKernel()); \ EXPECT_TRUE(Compare()); \ } TCASE(T0, 4, 2, 2, 1.0f, 1.0f, 2.0f) TCASE(T1, 16, 1, 5, 1.0f, 1.0f, 2.0f) TCASE(T2, 16, 32, 2, 1.0f, 2.0f, 1.0f) TCASE(T3, 128, 4, 3, 2.0f, 1.0f, 1.0f) #undef TCASE static Graph* MakeRNGrad(int batches, int rows, int cols, int depth, int depth_radius) { Graph* g = new Graph(OpRegistry::Global()); Tensor grads(DT_FLOAT, TensorShape({batches, rows, cols, depth})); grads.flat<float>().setRandom(); Tensor in(DT_FLOAT, TensorShape({batches, rows, cols, depth})); in.flat<float>().setRandom(); Tensor out(DT_FLOAT, TensorShape({batches, rows, cols, depth})); Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("lrn_grad_op"), "LRNGrad") .Input(test::graph::Constant(g, grads)) .Input(test::graph::Constant(g, in)) .Input(test::graph::Constant(g, out)) .Attr("depth_radius", depth_radius) .Attr("bias", 1.0f) .Attr("alpha", 1.0f / 10) .Attr("beta", 2.0f) .Finalize(g, &ret)); return g; } #define BM_LRNGradDev(DEVICE, B, R, C, D, DR) \ static void BM_LRNGrad_##DEVICE##_##B##_##R##_##C##_##D##_##DR( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, MakeRNGrad(B, R, C, D, DR), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * B * R * \ C * D * DR * 4); \ } \ BENCHMARK(BM_LRNGrad_##DEVICE##_##B##_##R##_##C##_##D##_##DR) BM_LRNGradDev(cpu, 128, 12, 12, 64, 4); BM_LRNGradDev(cpu, 128, 56, 56, 64, 2); BM_LRNGradDev(cpu, 128, 27, 27, 192, 2); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/lrn_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/lrn_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f6c433cf-5674-44ec-907c-0487ea446d58
cpp
tensorflow/tensorflow
example_parsing_ops
tensorflow/core/kernels/example_parsing_ops.cc
tensorflow/core/kernels/example_parsing_ops_test.cc
#include <numeric> #include <unordered_set> #include <vector> #include "absl/base/call_once.h" #include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/feature.pb.h" #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/util/example_proto_fast_parsing.h" #include "tensorflow/core/util/example_proto_helper.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" #include "tensorflow/core/util/work_sharder.h" namespace tensorflow { namespace { constexpr char kParseExampleV2[] = "ParseExampleV2"; constexpr char kParseSequenceExampleV2[] = "ParseSequenceExampleV2"; } class ParseExampleOp : public OpKernel { public: explicit ParseExampleOp(OpKernelConstruction* ctx) : OpKernel(ctx), op_version_(ctx->def().op() == kParseExampleV2 ? 2 : 1) { OP_REQUIRES_OK(ctx, attrs_.Init(ctx, op_version_)); } void Compute(OpKernelContext* ctx) override { const Tensor* names; const Tensor* serialized; std::vector<StringPiece> dense_keys_t; std::vector<StringPiece> sparse_keys_t; std::vector<StringPiece> ragged_keys_t; OpInputList dense_defaults; OP_REQUIRES_OK(ctx, ctx->input("serialized", &serialized)); OP_REQUIRES(ctx, serialized->NumElements() == 0 || serialized->data(), errors::InvalidArgument( "Serialized data must not be null if there are elements")); OP_REQUIRES_OK(ctx, ctx->input("names", &names)); if (op_version_ == 2) { OP_REQUIRES_OK(ctx, GetTensorKeys(ctx, "dense_keys", &dense_keys_t)); OP_REQUIRES_OK(ctx, GetTensorKeys(ctx, "sparse_keys", &sparse_keys_t)); OP_REQUIRES_OK(ctx, GetTensorKeys(ctx, "ragged_keys", &ragged_keys_t)); } else { OP_REQUIRES_OK(ctx, GetInputListKeys(ctx, "dense_keys", &dense_keys_t)); OP_REQUIRES_OK(ctx, GetInputListKeys(ctx, "sparse_keys", &sparse_keys_t)); } absl::call_once(flag_, [&dense_keys_t, &sparse_keys_t, &ragged_keys_t]() { metrics::RecordParseDenseFeature(dense_keys_t.size()); metrics::RecordParseSparseFeature(sparse_keys_t.size()); metrics::RecordParseRaggedFeature(ragged_keys_t.size()); }); OP_REQUIRES_OK(ctx, ctx->input_list("dense_defaults", &dense_defaults)); OP_REQUIRES_OK( ctx, CheckInputShapes(serialized, names, dense_defaults, dense_keys_t, sparse_keys_t, ragged_keys_t)); example::FastParseExampleConfig config = MakeConfig(dense_keys_t, sparse_keys_t, ragged_keys_t, dense_defaults); example::Result result; if (TensorShapeUtils::IsVector(serialized->shape())) { OP_REQUIRES_OK( ctx, ParseExampleVector(config, serialized, names, ctx, &result)); } else { OP_REQUIRES_OK(ctx, ParseExampleScalar(config, serialized, ctx, &result)); } OP_REQUIRES_OK(ctx, WriteOutput(result, ctx)); } protected: Status GetTensorKeys(OpKernelContext* ctx, StringPiece input_name, std::vector<StringPiece>* keys) const { const Tensor* key_t; TF_RETURN_IF_ERROR(ctx->input(input_name, &key_t)); keys->reserve(key_t->NumElements()); auto keys_flat = key_t->flat<tstring>(); for (int i = 0; i < keys_flat.size(); ++i) { keys->push_back(keys_flat(i)); } return absl::OkStatus(); } Status GetInputListKeys(OpKernelContext* ctx, StringPiece input_name, std::vector<StringPiece>* keys) const { OpInputList key_list; TF_RETURN_IF_ERROR(ctx->input_list(input_name, &key_list)); keys->reserve(key_list.size()); for (const auto& key : key_list) { keys->push_back(key.scalar<tstring>()()); } return absl::OkStatus(); } Status CheckInputShapes(const Tensor* serialized, const Tensor* names, const OpInputList& dense_defaults, const std::vector<StringPiece>& dense_keys_t, const std::vector<StringPiece>& sparse_keys_t, const std::vector<StringPiece>& ragged_keys_t) const { if (op_version_ == 2) { if (TensorShapeUtils::IsMatrixOrHigher(serialized->shape())) { return errors::InvalidArgument( "Expected serialized to be a scalar or vector, got shape: ", serialized->shape().DebugString()); } } else { if (!TensorShapeUtils::IsVector(serialized->shape())) { return errors::InvalidArgument( "Expected serialized to be a vector, got shape: ", serialized->shape().DebugString()); } } if (names->NumElements() > 0 && names->shape() != serialized->shape()) { return errors::InvalidArgument( "Expected names have the same shape as serialized: name.shape=", names->shape().DebugString(), ", serialized.shape=", serialized->shape().DebugString()); } if (op_version_ == 2) { if (dense_keys_t.size() != attrs_.num_dense) { return errors::InvalidArgument( "Expected len(dense_keys) == len(dense_types) but got: ", dense_keys_t.size(), " vs. ", attrs_.num_dense); } if (sparse_keys_t.size() != attrs_.num_sparse) { return errors::InvalidArgument( "Expected len(sparse_keys) == num_sparse but got: ", sparse_keys_t.size(), " vs. ", attrs_.num_sparse); } if (ragged_keys_t.size() != attrs_.num_ragged) { return errors::InvalidArgument( "Expected len(ragged_keys) == len(ragged_value_types) but got: ", ragged_keys_t.size(), " vs. ", attrs_.num_ragged); } } if (dense_defaults.size() != attrs_.num_dense) { return errors::InvalidArgument( "Expected len(dense_defaults) == len(dense_keys) but got: ", dense_defaults.size(), " vs. ", attrs_.num_dense); } for (int d = 0; d < static_cast<int>(attrs_.num_dense); ++d) { const Tensor& def_value = dense_defaults[d]; if (attrs_.variable_length[d]) { if (def_value.NumElements() != 1) { return errors::InvalidArgument( "dense_shape[", d, "] is a variable length shape: ", attrs_.dense_shapes[d].DebugString(), ", therefore " "def_value[", d, "] must contain a single element (" "the padding element). But its shape is: ", def_value.shape().DebugString()); } } else if (def_value.NumElements() > 0) { if (!attrs_.dense_shapes[d].IsCompatibleWith(def_value.shape())) { return errors::InvalidArgument( "def_value[", d, "].shape() == ", def_value.shape().DebugString(), " is not compatible with dense_shapes_[", d, "] == ", attrs_.dense_shapes[d].DebugString()); } } if (def_value.dtype() != attrs_.dense_types[d]) { return errors::InvalidArgument( "dense_defaults[", d, "].dtype() == ", DataTypeString(def_value.dtype()), " != dense_types_[", d, "] == ", DataTypeString(attrs_.dense_types[d])); } } return absl::OkStatus(); } example::FastParseExampleConfig MakeConfig( const std::vector<StringPiece>& dense_keys_t, const std::vector<StringPiece>& sparse_keys_t, const std::vector<StringPiece>& ragged_keys_t, const OpInputList& dense_defaults) const { example::FastParseExampleConfig config; config.dense.reserve(attrs_.num_dense); for (int d = 0; d < attrs_.num_dense; ++d) { config.dense.emplace_back(dense_keys_t[d], attrs_.dense_types[d], attrs_.dense_shapes[d], dense_defaults[d], attrs_.variable_length[d], attrs_.elements_per_stride[d]); } config.sparse.reserve(attrs_.num_sparse); for (int d = 0; d < attrs_.num_sparse; ++d) { config.sparse.emplace_back(sparse_keys_t[d], attrs_.sparse_types[d]); } config.ragged.reserve(attrs_.num_ragged); for (int d = 0; d < attrs_.num_ragged; ++d) { config.ragged.emplace_back(ragged_keys_t[d], attrs_.ragged_value_types[d], attrs_.ragged_split_types[d]); } return config; } Status ParseExampleScalar(const example::FastParseExampleConfig& config, const Tensor* serialized, OpKernelContext* ctx, example::Result* result) const { const tstring& serialized_proto = serialized->scalar<tstring>()(); return FastParseSingleExample(config, serialized_proto, result); } Status ParseExampleVector(const example::FastParseExampleConfig& config, const Tensor* serialized, const Tensor* names, OpKernelContext* ctx, example::Result* result) const { auto serialized_t = serialized->flat<tstring>(); auto names_t = names->flat<tstring>(); absl::Span<const tstring> slice(serialized_t.data(), serialized_t.size()); absl::Span<const tstring> names_slice(names_t.data(), names_t.size()); return FastParseExample( config, slice, names_slice, ctx->device()->tensorflow_cpu_worker_threads()->workers, result); } Status WriteOutput(const example::Result& result, OpKernelContext* ctx) const { OpOutputList dense_values; OpOutputList sparse_indices; OpOutputList sparse_values; OpOutputList sparse_shapes; TF_RETURN_IF_ERROR(ctx->output_list("dense_values", &dense_values)); TF_RETURN_IF_ERROR(ctx->output_list("sparse_indices", &sparse_indices)); TF_RETURN_IF_ERROR(ctx->output_list("sparse_values", &sparse_values)); TF_RETURN_IF_ERROR(ctx->output_list("sparse_shapes", &sparse_shapes)); for (int d = 0; d < attrs_.num_dense; ++d) { dense_values.set(d, result.dense_values[d]); } for (int d = 0; d < attrs_.num_sparse; ++d) { sparse_indices.set(d, result.sparse_indices[d]); sparse_values.set(d, result.sparse_values[d]); sparse_shapes.set(d, result.sparse_shapes[d]); } if (op_version_ == 2) { OpOutputList ragged_values; OpOutputList ragged_splits; TF_RETURN_IF_ERROR(ctx->output_list("ragged_values", &ragged_values)); TF_RETURN_IF_ERROR(ctx->output_list("ragged_row_splits", &ragged_splits)); for (int d = 0; d < attrs_.num_ragged; ++d) { ragged_values.set(d, result.ragged_values[d]); ragged_splits.set(d, result.ragged_splits[d]); } } return absl::OkStatus(); } ParseExampleAttrs attrs_; int op_version_; absl::once_flag flag_; }; REGISTER_KERNEL_BUILDER(Name("ParseExample").Device(DEVICE_CPU), ParseExampleOp); REGISTER_KERNEL_BUILDER(Name("ParseExampleV2").Device(DEVICE_CPU), ParseExampleOp); class ParseSingleExampleOp : public OpKernel { public: explicit ParseSingleExampleOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, attrs_.Init(ctx)); metrics::RecordParseDenseFeature(attrs_.dense_keys.size()); metrics::RecordParseSparseFeature(attrs_.sparse_keys.size()); } void Compute(OpKernelContext* ctx) override { const Tensor* serialized; OpInputList dense_defaults; OP_REQUIRES_OK(ctx, ctx->input("serialized", &serialized)); OP_REQUIRES_OK(ctx, ctx->input_list("dense_defaults", &dense_defaults)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(serialized->shape()), errors::InvalidArgument( "Expected serialized to be a scalar, got shape: ", serialized->shape().DebugString())); OP_REQUIRES(ctx, dense_defaults.size() == attrs_.dense_keys.size(), errors::InvalidArgument( "Expected len(dense_defaults) == len(dense_keys) but got: ", dense_defaults.size(), " vs. ", attrs_.dense_keys.size())); for (size_t d = 0; d < attrs_.dense_keys.size(); ++d) { const Tensor& def_value = dense_defaults[d]; if (attrs_.variable_length[d]) { OP_REQUIRES(ctx, def_value.NumElements() == 1, errors::InvalidArgument( "dense_shape[", d, "] is a variable length shape: ", attrs_.dense_shapes[d].DebugString(), ", therefore " "def_value[", d, "] must contain a single element (" "the padding element). But its shape is: ", def_value.shape().DebugString())); } else if (def_value.NumElements() > 0) { OP_REQUIRES(ctx, attrs_.dense_shapes[d].IsCompatibleWith(def_value.shape()), errors::InvalidArgument( "def_value[", d, "].shape() == ", def_value.shape().DebugString(), " is not compatible with dense_shapes_[", d, "] == ", attrs_.dense_shapes[d].DebugString())); } OP_REQUIRES(ctx, def_value.dtype() == attrs_.dense_types[d], errors::InvalidArgument( "dense_defaults[", d, "].dtype() == ", DataTypeString(def_value.dtype()), " != dense_types_[", d, "] == ", DataTypeString(attrs_.dense_types[d]))); } example::Result result; example::FastParseExampleConfig config; for (int d = 0; d < attrs_.dense_keys.size(); ++d) { config.dense.push_back({attrs_.dense_keys[d], attrs_.dense_types[d], attrs_.dense_shapes[d], dense_defaults[d], attrs_.variable_length[d], attrs_.elements_per_stride[d]}); } for (int d = 0; d < attrs_.sparse_keys.size(); ++d) { config.sparse.push_back({attrs_.sparse_keys[d], attrs_.sparse_types[d]}); } const tstring& serialized_proto = serialized->scalar<tstring>()(); OP_REQUIRES_OK(ctx, FastParseSingleExample(config, serialized_proto, &result)); OpOutputList dense_values; OpOutputList sparse_indices; OpOutputList sparse_values; OpOutputList sparse_shapes; OP_REQUIRES_OK(ctx, ctx->output_list("dense_values", &dense_values)); OP_REQUIRES_OK(ctx, ctx->output_list("sparse_indices", &sparse_indices)); OP_REQUIRES_OK(ctx, ctx->output_list("sparse_values", &sparse_values)); OP_REQUIRES_OK(ctx, ctx->output_list("sparse_shapes", &sparse_shapes)); for (int d = 0; d < attrs_.dense_keys.size(); ++d) { dense_values.set(d, result.dense_values[d]); } for (int d = 0; d < attrs_.sparse_keys.size(); ++d) { sparse_indices.set(d, result.sparse_indices[d]); sparse_values.set(d, result.sparse_values[d]); sparse_shapes.set(d, result.sparse_shapes[d]); } } protected: ParseSingleExampleAttrs attrs_; }; REGISTER_KERNEL_BUILDER(Name("ParseSingleExample").Device(DEVICE_CPU), ParseSingleExampleOp); class ParseSequenceExampleOp : public OpKernel { public: explicit ParseSequenceExampleOp(OpKernelConstruction* ctx) : OpKernel(ctx), op_version_(ctx->def().op() == kParseSequenceExampleV2 ? 2 : 1) { OP_REQUIRES_OK(ctx, attrs_.Init(ctx, op_version_)); metrics::RecordParseDenseFeature(attrs_.context_dense_keys.size() + attrs_.feature_list_dense_keys.size()); metrics::RecordParseSparseFeature(attrs_.context_sparse_keys.size() + attrs_.feature_list_sparse_keys.size()); } void Compute(OpKernelContext* ctx) override { const Tensor* debug_name; const Tensor* serialized; OpInputList context_dense_defaults; OP_REQUIRES_OK(ctx, ctx->input("debug_name", &debug_name)); OP_REQUIRES_OK(ctx, ctx->input("serialized", &serialized)); OP_REQUIRES_OK(ctx, ctx->input_list("context_dense_defaults", &context_dense_defaults)); const Tensor* context_dense_keys = nullptr; const Tensor* context_sparse_keys = nullptr; const Tensor* context_ragged_keys = nullptr; const Tensor* feature_list_dense_keys = nullptr; const Tensor* feature_list_sparse_keys = nullptr; const Tensor* feature_list_ragged_keys = nullptr; const Tensor* feature_list_dense_missing_assumed_empty = nullptr; if (op_version_ == 2) { OP_REQUIRES_OK(ctx, ctx->input("feature_list_dense_missing_assumed_empty", &feature_list_dense_missing_assumed_empty)); OP_REQUIRES_OK(ctx, ctx->input("context_dense_keys", &context_dense_keys)); OP_REQUIRES_OK(ctx, ctx->input("context_sparse_keys", &context_sparse_keys)); OP_REQUIRES_OK(ctx, ctx->input("context_ragged_keys", &context_ragged_keys)); OP_REQUIRES_OK( ctx, ctx->input("feature_list_dense_keys", &feature_list_dense_keys)); OP_REQUIRES_OK(ctx, ctx->input("feature_list_sparse_keys", &feature_list_sparse_keys)); OP_REQUIRES_OK(ctx, ctx->input("feature_list_ragged_keys", &feature_list_ragged_keys)); absl::call_once(flag_, [&]() { metrics::RecordParseDenseFeature( context_dense_keys->NumElements() + feature_list_dense_keys->NumElements()); metrics::RecordParseSparseFeature( context_sparse_keys->NumElements() + feature_list_sparse_keys->NumElements()); metrics::RecordParseRaggedFeature( context_ragged_keys->NumElements() + feature_list_ragged_keys->NumElements()); }); } OP_REQUIRES_OK(ctx, CheckInputShapes( serialized, debug_name, context_dense_defaults, context_dense_keys, context_sparse_keys, context_ragged_keys, feature_list_dense_keys, feature_list_sparse_keys, feature_list_ragged_keys, feature_list_dense_missing_assumed_empty)); example::FastParseExampleConfig context_config = MakeContextConfig(context_dense_keys, context_sparse_keys, context_ragged_keys, context_dense_defaults); example::FastParseExampleConfig feature_list_config = MakeFeatureListConfig( feature_list_dense_keys, feature_list_sparse_keys, feature_list_ragged_keys, feature_list_dense_missing_assumed_empty); bool is_batch = TensorShapeUtils::IsVector(serialized->shape()); auto serialized_t = serialized->flat<tstring>(); auto debug_name_t = debug_name->flat<tstring>(); absl::Span<const tstring> slice(serialized_t.data(), serialized_t.size()); absl::Span<const tstring> names_slice(debug_name_t.data(), debug_name_t.size()); example::Result context_result, feature_list_result; std::vector<Tensor> dense_feature_lengths; OP_REQUIRES_OK( ctx, FastParseSequenceExample( context_config, feature_list_config, slice, names_slice, ctx->device()->tensorflow_cpu_worker_threads()->workers, &context_result, &feature_list_result, &dense_feature_lengths, is_batch)); OP_REQUIRES_OK(ctx, WriteOutput(context_result, feature_list_result, dense_feature_lengths, ctx)); } protected: Status CheckInputShapes( const Tensor* serialized, const Tensor* names, const OpInputList& context_dense_defaults, const Tensor* context_dense_keys, const Tensor* context_sparse_keys, const Tensor* context_ragged_keys, const Tensor* feature_list_dense_keys, const Tensor* feature_list_sparse_keys, const Tensor* feature_list_ragged_keys, const Tensor* feature_list_dense_missing_assumed_empty) const { if (TensorShapeUtils::IsMatrixOrHigher(serialized->shape())) { return errors::InvalidArgument( "Expected serialized to be a scalar or vector, got shape: ", serialized->shape().DebugString()); } if (op_version_ > 1) { if (context_dense_keys->NumElements() != attrs_.num_context_dense) { return errors::InvalidArgument( "Expected len(context_dense_keys) to match len(Tcontext_dense)"); } if (context_sparse_keys->NumElements() != attrs_.num_context_sparse) { return errors::InvalidArgument( "Expected len(context_sparse_keys) to match Ncontext_sparse"); } if (context_ragged_keys->NumElements() != attrs_.num_context_ragged) { return errors::InvalidArgument( "Expected len(context_ragged_keys) to match " "len(context_ragged_value_types)"); } if (feature_list_dense_keys->NumElements() != attrs_.num_feature_list_dense) { return errors::InvalidArgument( "Expected len(feature_list_dense_keys) to match " "Nfeature_list_dense"); } if (feature_list_dense_missing_assumed_empty->NumElements() != attrs_.num_feature_list_dense) { return errors::InvalidArgument( "Expected len(feature_list_dense_missing_assumed_empty to match " "Nfeature_list_dense"); } if (feature_list_sparse_keys->NumElements() != attrs_.num_feature_list_sparse) { return errors::InvalidArgument( "Expected len(feature_list_sparse_keys) to match " "Nfeature_list_sparse"); } if (feature_list_ragged_keys->NumElements() != attrs_.num_feature_list_ragged) { return errors::InvalidArgument( "Expected len(feature_list_ragged_keys) to match " "len(feature_list_ragged_value_types)"); } } if (context_dense_defaults.size() != attrs_.num_context_dense) { return errors::InvalidArgument( "Expected len(context_dense_defaults) " "== len(context_dense_keys) but got: ", context_dense_defaults.size(), " vs. ", attrs_.num_context_dense); } for (int d = 0; d < attrs_.num_context_dense; ++d) { const Tensor& def_value = context_dense_defaults[d]; if (def_value.NumElements() > 0) { if (def_value.shape() != attrs_.context_dense_shapes[d]) { return errors::InvalidArgument( "default_value[", d, "].shape() == ", def_value.shape().DebugString(), " != context_dense_shapes[", d, "] == ", attrs_.context_dense_shapes[d].DebugString()); } if (def_value.dtype() != attrs_.context_dense_types[d]) { return errors::InvalidArgument( "context_dense_defaults[", d, "].dtype() == ", DataTypeString(def_value.dtype()), " != context_dense_types[", d, "] == ", DataTypeString(attrs_.context_dense_types[d])); } } } return absl::OkStatus(); } example::FastParseExampleConfig MakeContextConfig( const Tensor* dense_keys, const Tensor* sparse_keys, const Tensor* ragged_keys, const OpInputList& context_dense_defaults) const { absl::Span<const tstring> dense_keys_slice = dense_keys ? absl::Span<const tstring>(dense_keys->flat<tstring>().data(), attrs_.num_context_dense) : attrs_.context_dense_keys; absl::Span<const tstring> sparse_keys_slice = sparse_keys ? absl::Span<const tstring>(sparse_keys->flat<tstring>().data(), attrs_.num_context_sparse) : attrs_.context_sparse_keys; absl::Span<const tstring> ragged_keys_slice = ragged_keys ? absl::Span<const tstring>(ragged_keys->flat<tstring>().data(), attrs_.num_context_ragged) : absl::Span<const tstring>(nullptr, 0); example::FastParseExampleConfig config; config.dense.reserve(attrs_.num_context_dense); for (int d = 0; d < attrs_.num_context_dense; ++d) { const tstring& key = dense_keys_slice[d]; config.dense.emplace_back(key, attrs_.context_dense_types[d], attrs_.context_dense_shapes[d], context_dense_defaults[d], false , 0 ); } config.sparse.reserve(attrs_.num_context_sparse); for (int d = 0; d < attrs_.num_context_sparse; ++d) { const tstring& key = sparse_keys_slice[d]; config.sparse.emplace_back(key, attrs_.context_sparse_types[d]); } config.ragged.reserve(attrs_.num_context_ragged); for (int d = 0; d < attrs_.num_context_ragged; ++d) { config.ragged.emplace_back(ragged_keys_slice[d], attrs_.context_ragged_value_types[d], attrs_.context_ragged_split_types[d]); } return config; } static Tensor ConstructDefaultScalar(DataType dtype) { switch (dtype) { case DT_INT64: return Tensor(static_cast<int64_t>(0)); case DT_FLOAT: return Tensor(static_cast<float>(0.0)); case DT_STRING: return Tensor(""); default: return Tensor(DT_INVALID); } } example::FastParseExampleConfig MakeFeatureListConfig( const Tensor* dense_keys, const Tensor* sparse_keys, const Tensor* ragged_keys, const Tensor* feature_list_dense_missing_assumed_empty) const { absl::Span<const tstring> dense_keys_slice = dense_keys ? absl::Span<const tstring>(dense_keys->flat<tstring>().data(), attrs_.num_feature_list_dense) : attrs_.feature_list_dense_keys; absl::Span<const tstring> sparse_keys_slice = sparse_keys ? absl::Span<const tstring>(sparse_keys->flat<tstring>().data(), attrs_.num_feature_list_sparse) : attrs_.feature_list_sparse_keys; absl::Span<const tstring> ragged_keys_slice = ragged_keys ? absl::Span<const tstring>(ragged_keys->flat<tstring>().data(), attrs_.num_feature_list_ragged) : absl::Span<const tstring>(nullptr, 0); absl::Span<const bool> feature_list_dense_missing_assumed_empty_slice = feature_list_dense_missing_assumed_empty ? absl::Span<const bool>( feature_list_dense_missing_assumed_empty->flat<bool>().data(), attrs_.num_feature_list_dense) : absl::Span<const bool>(nullptr, 0); example::FastParseExampleConfig config; config.dense.reserve(attrs_.num_feature_list_dense); for (int d = 0; d < attrs_.num_feature_list_dense; ++d) { const tstring& key = dense_keys_slice[d]; bool missing_assumed_empty = !feature_list_dense_missing_assumed_empty_slice.empty() ? feature_list_dense_missing_assumed_empty_slice[d] : attrs_.feature_list_dense_missing_assumed_empty.count(key) > 0; DataType dtype = attrs_.feature_list_dense_types[d]; config.dense.emplace_back( key, dtype, attrs_.feature_list_dense_shapes[d], ConstructDefaultScalar(dtype), missing_assumed_empty, 0 ); } config.sparse.reserve(attrs_.num_feature_list_sparse); for (int d = 0; d < attrs_.num_feature_list_sparse; ++d) { const tstring& key = sparse_keys_slice[d]; config.sparse.emplace_back(key, attrs_.feature_list_sparse_types[d]); } config.ragged.reserve(attrs_.num_feature_list_ragged); for (int d = 0; d < attrs_.num_feature_list_ragged; ++d) { config.ragged.emplace_back(ragged_keys_slice[d], attrs_.feature_list_ragged_value_types[d], attrs_.feature_list_ragged_split_types[d]); } return config; } Status WriteOutput(const example::Result& context_result, const example::Result& feature_list_result, const std::vector<Tensor>& dense_feature_lengths, OpKernelContext* ctx) const { OpOutputList context_sparse_indices; OpOutputList context_sparse_values; OpOutputList context_sparse_shapes; OpOutputList context_dense_values; OpOutputList feature_list_sparse_indices; OpOutputList feature_list_sparse_values; OpOutputList feature_list_sparse_shapes; OpOutputList feature_list_dense_values; OpOutputList feature_list_dense_lengths; TF_RETURN_IF_ERROR( ctx->output_list("context_sparse_indices", &context_sparse_indices)); TF_RETURN_IF_ERROR( ctx->output_list("context_sparse_values", &context_sparse_values)); TF_RETURN_IF_ERROR( ctx->output_list("context_sparse_shapes", &context_sparse_shapes)); TF_RETURN_IF_ERROR( ctx->output_list("context_dense_values", &context_dense_values)); TF_RETURN_IF_ERROR( ctx->output_list("context_sparse_indices", &context_sparse_indices)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_sparse_indices", &feature_list_sparse_indices)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_sparse_values", &feature_list_sparse_values)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_sparse_shapes", &feature_list_sparse_shapes)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_dense_values", &feature_list_dense_values)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_dense_lengths", &feature_list_dense_lengths)); for (int d = 0; d < attrs_.num_context_dense; ++d) { context_dense_values.set(d, context_result.dense_values[d]); } for (int d = 0; d < attrs_.num_feature_list_dense; ++d) { feature_list_dense_values.set(d, feature_list_result.dense_values[d]); feature_list_dense_lengths.set(d, dense_feature_lengths[d]); } for (int d = 0; d < attrs_.num_context_sparse; ++d) { context_sparse_indices.set(d, context_result.sparse_indices[d]); context_sparse_values.set(d, context_result.sparse_values[d]); context_sparse_shapes.set(d, context_result.sparse_shapes[d]); } for (int d = 0; d < attrs_.num_feature_list_sparse; ++d) { feature_list_sparse_indices.set(d, feature_list_result.sparse_indices[d]); feature_list_sparse_values.set(d, feature_list_result.sparse_values[d]); feature_list_sparse_shapes.set(d, feature_list_result.sparse_shapes[d]); } if (op_version_ == 2) { OpOutputList context_ragged_values; OpOutputList context_ragged_splits; OpOutputList feature_list_ragged_values; OpOutputList feature_list_ragged_inner_splits; OpOutputList feature_list_ragged_outer_splits; TF_RETURN_IF_ERROR( ctx->output_list("context_ragged_values", &context_ragged_values)); TF_RETURN_IF_ERROR(ctx->output_list("context_ragged_row_splits", &context_ragged_splits)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_ragged_values", &feature_list_ragged_values)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_ragged_inner_splits", &feature_list_ragged_inner_splits)); TF_RETURN_IF_ERROR(ctx->output_list("feature_list_ragged_outer_splits", &feature_list_ragged_outer_splits)); for (int d = 0; d < attrs_.num_context_ragged; ++d) { context_ragged_values.set(d, context_result.ragged_values[d]); context_ragged_splits.set(d, context_result.ragged_splits[d]); } for (int d = 0; d < attrs_.num_feature_list_ragged; ++d) { feature_list_ragged_values.set(d, feature_list_result.ragged_values[d]); feature_list_ragged_outer_splits.set( d, feature_list_result.ragged_outer_splits[d]); feature_list_ragged_inner_splits.set( d, feature_list_result.ragged_splits[d]); } } return absl::OkStatus(); } ParseSequenceExampleAttrs attrs_; int op_version_; absl::once_flag flag_; }; REGISTER_KERNEL_BUILDER(Name("ParseSequenceExample").Device(DEVICE_CPU), ParseSequenceExampleOp); REGISTER_KERNEL_BUILDER(Name("ParseSequenceExampleV2").Device(DEVICE_CPU), ParseSequenceExampleOp); class ParseSingleSequenceExampleOp : public OpKernel { public: explicit ParseSingleSequenceExampleOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, attrs_.Init(ctx)); } void Compute(OpKernelContext* ctx) override { const Tensor* debug_name; const Tensor* serialized; OpInputList context_dense_keys; OpInputList context_sparse_keys; OpInputList context_dense_defaults; OpInputList feature_list_dense_keys; OpInputList feature_list_sparse_keys; const Tensor* feature_list_dense_missing_assumed_empty; OP_REQUIRES_OK(ctx, ctx->input("debug_name", &debug_name)); OP_REQUIRES_OK(ctx, ctx->input("serialized", &serialized)); OP_REQUIRES_OK(ctx, ctx->input("feature_list_dense_missing_assumed_empty", &feature_list_dense_missing_assumed_empty)); OP_REQUIRES_OK(ctx, ctx->input_list("context_dense_keys", &context_dense_keys)); OP_REQUIRES_OK(ctx, ctx->input_list("feature_list_dense_keys", &feature_list_dense_keys)); OP_REQUIRES_OK( ctx, ctx->input_list("context_sparse_keys", &context_sparse_keys)); OP_REQUIRES_OK(ctx, ctx->input_list("feature_list_sparse_keys", &feature_list_sparse_keys)); OP_REQUIRES_OK(ctx, ctx->input_list("context_dense_defaults", &context_dense_defaults)); std::vector<string> context_dense_keys_t(attrs_.num_context_dense); std::vector<string> context_sparse_keys_t(attrs_.num_context_sparse); std::vector<string> feature_list_dense_keys_t( attrs_.num_feature_list_dense); std::vector<string> feature_list_sparse_keys_t( attrs_.num_feature_list_sparse); absl::call_once( flag_, [&context_dense_keys_t, &context_sparse_keys_t, &feature_list_dense_keys_t, &feature_list_sparse_keys_t]() { metrics::RecordParseDenseFeature(context_dense_keys_t.size() + feature_list_dense_keys_t.size()); metrics::RecordParseSparseFeature(context_sparse_keys_t.size() + feature_list_sparse_keys_t.size()); }); std::unordered_set<string> feature_list_dense_missing_assumed_empty_set; CHECK_EQ(context_dense_keys.size(), attrs_.num_context_dense); CHECK_EQ(context_sparse_keys.size(), attrs_.num_context_sparse); CHECK_EQ(feature_list_dense_keys.size(), attrs_.num_feature_list_dense); CHECK_EQ(feature_list_sparse_keys.size(), attrs_.num_feature_list_sparse); for (int di = 0; di < attrs_.num_context_dense; ++di) { OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(context_dense_keys[di].shape()), errors::InvalidArgument( "Expected context_dense_keys[", di, "] to be a scalar, got shape: ", context_dense_keys[di].shape().DebugString())); context_dense_keys_t[di] = context_dense_keys[di].scalar<tstring>()(); } for (int di = 0; di < attrs_.num_context_sparse; ++di) { OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(context_sparse_keys[di].shape()), errors::InvalidArgument( "Expected context_sparse_keys[", di, "] to be a scalar, got shape: ", context_sparse_keys[di].shape().DebugString())); context_sparse_keys_t[di] = context_sparse_keys[di].scalar<tstring>()(); } for (int di = 0; di < attrs_.num_feature_list_dense; ++di) { OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(feature_list_dense_keys[di].shape()), errors::InvalidArgument( "Expected feature_list_dense_keys[", di, "] to be a scalar, got shape: ", feature_list_dense_keys[di].shape().DebugString())); feature_list_dense_keys_t[di] = feature_list_dense_keys[di].scalar<tstring>()(); } for (int di = 0; di < attrs_.num_feature_list_sparse; ++di) { OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(feature_list_sparse_keys[di].shape()), errors::InvalidArgument( "Expected feature_list_sparse_keys[", di, "] to be a scalar, got shape: ", feature_list_sparse_keys[di].shape().DebugString())); feature_list_sparse_keys_t[di] = feature_list_sparse_keys[di].scalar<tstring>()(); } OP_REQUIRES( ctx, TensorShapeUtils::IsVector( feature_list_dense_missing_assumed_empty->shape()), errors::InvalidArgument( "Expected feature_list_dense_missing_assumed_empty ", "to be a vector, got shape: ", feature_list_dense_missing_assumed_empty->shape().DebugString())); auto feature_list_dense_missing_assumped_empty_t = feature_list_dense_missing_assumed_empty->vec<tstring>(); for (int de = 0; de < feature_list_dense_missing_assumed_empty->NumElements(); ++de) { feature_list_dense_missing_assumed_empty_set.insert( feature_list_dense_missing_assumped_empty_t(de)); } bool has_debug_name = (debug_name->NumElements() > 0); if (has_debug_name) { OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(debug_name->shape()), errors::InvalidArgument( "Expected debug_name to be a scalar, got shape: ", debug_name->shape().DebugString())); } auto debug_name_t = debug_name->scalar<tstring>(); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(serialized->shape()), errors::InvalidArgument( "Expected serialized to be a scalar, got shape: ", serialized->shape().DebugString())); OP_REQUIRES(ctx, context_dense_defaults.size() == attrs_.num_context_dense, errors::InvalidArgument("Expected len(context_dense_defaults) " "== len(context_dense_keys) but got: ", context_dense_defaults.size(), " vs. ", attrs_.num_context_dense)); std::vector<bool> required(attrs_.num_context_dense); for (int d = 0; d < attrs_.num_context_dense; ++d) { const Tensor& def_value = context_dense_defaults[d]; required[d] = (def_value.NumElements() == 0); if (def_value.NumElements() > 0) { OP_REQUIRES(ctx, def_value.shape() == attrs_.context_dense_shapes[d], errors::InvalidArgument( "def_value[", d, "].shape() == ", def_value.shape().DebugString(), " != context_dense_shapes_[", d, "] == ", attrs_.context_dense_shapes[d].DebugString())); OP_REQUIRES( ctx, def_value.dtype() == attrs_.context_dense_types[d], errors::InvalidArgument( "context_dense_defaults[", d, "].dtype() == ", DataTypeString(def_value.dtype()), " != context_dense_types_[", d, "] == ", DataTypeString(attrs_.context_dense_types[d]))); } } auto serialized_t = serialized->scalar<tstring>(); OpOutputList context_sparse_indices; OpOutputList context_sparse_values; OpOutputList context_sparse_shapes; OpOutputList context_dense_values; OpOutputList feature_list_sparse_indices; OpOutputList feature_list_sparse_values; OpOutputList feature_list_sparse_shapes; OpOutputList feature_list_dense_values; OP_REQUIRES_OK(ctx, ctx->output_list("context_sparse_indices", &context_sparse_indices)); OP_REQUIRES_OK( ctx, ctx->output_list("context_sparse_values", &context_sparse_values)); OP_REQUIRES_OK( ctx, ctx->output_list("context_sparse_shapes", &context_sparse_shapes)); OP_REQUIRES_OK( ctx, ctx->output_list("context_dense_values", &context_dense_values)); OP_REQUIRES_OK(ctx, ctx->output_list("context_sparse_indices", &context_sparse_indices)); OP_REQUIRES_OK(ctx, ctx->output_list("feature_list_sparse_indices", &feature_list_sparse_indices)); OP_REQUIRES_OK(ctx, ctx->output_list("feature_list_sparse_values", &feature_list_sparse_values)); OP_REQUIRES_OK(ctx, ctx->output_list("feature_list_sparse_shapes", &feature_list_sparse_shapes)); OP_REQUIRES_OK(ctx, ctx->output_list("feature_list_dense_values", &feature_list_dense_values)); protobuf::ArenaOptions options; const size_t block_size = serialized_t().size() * 1.1; options.start_block_size = std::max(options.start_block_size, block_size); options.max_block_size = std::max(options.max_block_size, block_size); protobuf::Arena arena(options); auto& ex = *protobuf::Arena::Create<SequenceExample>(&arena); OP_REQUIRES( ctx, ParseProtoUnlimited(&ex, serialized_t()), errors::InvalidArgument("Could not parse example input, value: '", serialized_t(), "'")); const tstring& name = (has_debug_name) ? debug_name_t() : "<unknown>"; const Features& context = ex.context(); const auto& context_dict = context.feature(); for (int d = 0; d < attrs_.num_context_dense; ++d) { TensorShape out_shape; for (const int dim : attrs_.context_dense_shapes[d].dim_sizes()) OP_REQUIRES_OK(ctx, out_shape.AddDimWithStatus(dim)); Tensor* out = nullptr; OP_REQUIRES_OK(ctx, context_dense_values.allocate(d, out_shape, &out)); } for (int d = 0; d < attrs_.num_context_dense; ++d) { const tstring& key = context_dense_keys_t[d]; const DataType& dtype = attrs_.context_dense_types[d]; const TensorShape& shape = attrs_.context_dense_shapes[d]; const auto& feature_found = context_dict.find(key); OP_REQUIRES( ctx, (feature_found != context_dict.end()) || !required[d], errors::InvalidArgument("Name: ", name, ", Context feature '", key, "' is required but could not be found.")); if (feature_found != context_dict.end()) { const Feature& f = feature_found->second; bool types_match; OP_REQUIRES_OK(ctx, CheckTypesMatch(f, dtype, &types_match)); OP_REQUIRES( ctx, types_match, errors::InvalidArgument("Name: ", name, ", Context feature: ", key, ". Data types don't match. ", "Expected type: ", DataTypeString(dtype), " Feature is: ", f.DebugString())); OP_REQUIRES_OK(ctx, FeatureDenseCopy(0, name, key, dtype, shape, f, context_dense_values[d])); } else { RowDenseCopy(0, dtype, context_dense_defaults[d], context_dense_values[d]); } } for (int d = 0; d < attrs_.num_context_sparse; ++d) { const tstring& key = context_sparse_keys_t[d]; const DataType& dtype = attrs_.context_sparse_types[d]; const auto& feature_found = context_dict.find(key); bool feature_has_data = (feature_found != context_dict.end() && (feature_found->second.kind_case() != Feature::KIND_NOT_SET)); if (feature_has_data) { const Feature& f = feature_found->second; bool types_match; OP_REQUIRES_OK(ctx, CheckTypesMatch(f, dtype, &types_match)); OP_REQUIRES( ctx, types_match, errors::InvalidArgument("Name: ", name, ", Context feature: ", key, ". Data types don't match. ", "Expected type: ", DataTypeString(dtype), " Feature is: ", f.DebugString())); Tensor feature_values = FeatureSparseCopy(0, key, dtype, f); const int64_t num_elements = feature_values.NumElements(); TensorShape indices_shape({num_elements, 1}); Tensor* sp_indices_d = nullptr; Tensor* sp_shape_d = nullptr; OP_REQUIRES_OK(ctx, context_sparse_indices.allocate(d, indices_shape, &sp_indices_d)); context_sparse_values.set(d, feature_values); OP_REQUIRES_OK(ctx, context_sparse_shapes.allocate(d, TensorShape({1}), &sp_shape_d)); auto shape_t = sp_shape_d->vec<int64_t>(); shape_t(0) = num_elements; auto indices_t = sp_indices_d->matrix<int64_t>(); std::iota(indices_t.data(), indices_t.data() + num_elements, 0); } else { TensorShape indices_shape({0, 1}); TensorShape values_shape({0}); Tensor* sp_indices_d = nullptr; Tensor* sp_values_d = nullptr; Tensor* sp_shape_d = nullptr; OP_REQUIRES_OK(ctx, context_sparse_indices.allocate(d, indices_shape, &sp_indices_d)); OP_REQUIRES_OK( ctx, context_sparse_values.allocate(d, values_shape, &sp_values_d)); OP_REQUIRES_OK(ctx, context_sparse_shapes.allocate(d, TensorShape({1}), &sp_shape_d)); auto shape_t = sp_shape_d->vec<int64_t>(); shape_t(0) = 0; } } const FeatureLists& feature_lists = ex.feature_lists(); const auto& feature_list_dict = feature_lists.feature_list(); FeatureList empty_feature_list; for (int d = 0; d < attrs_.num_feature_list_dense; ++d) { const tstring& key = feature_list_dense_keys_t[d]; const DataType& dtype = attrs_.feature_list_dense_types[d]; const TensorShape& shape = attrs_.feature_list_dense_shapes[d]; const auto& feature_list_found = feature_list_dict.find(key); bool feature_list_missing = (feature_list_found == feature_list_dict.end()); bool feature_list_allowed_missing = (feature_list_dense_missing_assumed_empty_set.count(key) > 0); OP_REQUIRES( ctx, !feature_list_missing || feature_list_allowed_missing, errors::InvalidArgument("Name: ", name, ", Feature list '", key, "' is required but could not be found. " "Did you mean to include it in " "feature_list_dense_missing_assumed_empty or " "feature_list_dense_defaults?")); TensorShape out_shape; const FeatureList& fl = (feature_list_missing) ? empty_feature_list : feature_list_found->second; OP_REQUIRES_OK(ctx, out_shape.AddDimWithStatus(fl.feature_size())); for (const int dim : attrs_.feature_list_dense_shapes[d].dim_sizes()) { OP_REQUIRES_OK(ctx, out_shape.AddDimWithStatus(dim)); } Tensor* out = nullptr; OP_REQUIRES_OK(ctx, feature_list_dense_values.allocate(d, out_shape, &out)); for (int64_t t = 0; t < fl.feature_size(); ++t) { const Feature& f = fl.feature(t); bool types_match; OP_REQUIRES_OK(ctx, CheckTypesMatch(f, dtype, &types_match)); OP_REQUIRES(ctx, types_match, errors::InvalidArgument( "Name: ", name, ", Feature list: ", key, ", Index: ", t, ". Data types don't match. ", "Expected type: ", DataTypeString(dtype), " Feature is: ", f.DebugString())); OP_REQUIRES_OK(ctx, FeatureDenseCopy(t, name, key, dtype, shape, f, feature_list_dense_values[d])); } } for (int d = 0; d < attrs_.num_feature_list_sparse; ++d) { const tstring& key = feature_list_sparse_keys_t[d]; const DataType& dtype = attrs_.feature_list_sparse_types[d]; const auto& feature_list_found = feature_list_dict.find(key); bool feature_list_has_data = (feature_list_found != feature_list_dict.end()); std::vector<Tensor> sparse_values_tmp; int64_t feature_list_size = 0; if (feature_list_has_data) { const FeatureList& fl = feature_list_found->second; feature_list_size = fl.feature_size(); for (int64_t t = 0; t < feature_list_size; ++t) { const Feature& f = fl.feature(t); bool types_match; OP_REQUIRES_OK(ctx, CheckTypesMatch(f, dtype, &types_match)); OP_REQUIRES( ctx, f.kind_case() == Feature::KIND_NOT_SET || types_match, errors::InvalidArgument("Name: ", name, ", Feature List: ", key, ", Index: ", t, ". Data types don't match. ", "Expected type: ", DataTypeString(dtype), " Feature is: ", f.DebugString())); sparse_values_tmp.push_back(FeatureSparseCopy(t, key, dtype, f)); } } else { sparse_values_tmp.push_back(Tensor(dtype, TensorShape({0}))); } int64_t total_num_features = 0; int64_t max_num_features = 0; for (int t = 0; t < feature_list_size; ++t) { const Tensor& v = sparse_values_tmp[t]; const int64_t num_elements = v.shape().num_elements(); total_num_features += num_elements; max_num_features = std::max(max_num_features, num_elements); } TensorShape indices_shape({total_num_features, 2}); TensorShape values_shape({total_num_features}); Tensor* sp_indices_d = nullptr; Tensor* sp_values_d = nullptr; Tensor* sp_shape_d = nullptr; OP_REQUIRES_OK(ctx, feature_list_sparse_indices.allocate(d, indices_shape, &sp_indices_d)); OP_REQUIRES_OK(ctx, feature_list_sparse_values.allocate(d, values_shape, &sp_values_d)); OP_REQUIRES_OK(ctx, feature_list_sparse_shapes.allocate( d, TensorShape({2}), &sp_shape_d)); auto shape_t = sp_shape_d->vec<int64_t>(); shape_t(0) = feature_list_size; shape_t(1) = max_num_features; int64_t offset = 0; for (int t = 0; t < feature_list_size; ++t) { const int64_t num_elements = CopyIntoSparseTensor( sparse_values_tmp[t], t, offset, sp_indices_d, sp_values_d); offset += num_elements; } } } protected: ParseSingleSequenceExampleAttrs attrs_; absl::once_flag flag_; }; REGISTER_KERNEL_BUILDER(Name("ParseSingleSequenceExample").Device(DEVICE_CPU), ParseSingleSequenceExampleOp); #ifndef IS_MOBILE_PLATFORM class DecodeJSONExampleOp : public OpKernel { public: explicit DecodeJSONExampleOp(OpKernelConstruction* ctx) : OpKernel(ctx) { resolver_.reset(protobuf::util::NewTypeResolverForDescriptorPool( "type.googleapis.com", protobuf::DescriptorPool::generated_pool())); } void Compute(OpKernelContext* ctx) override { const Tensor* json_examples; OP_REQUIRES_OK(ctx, ctx->input("json_examples", &json_examples)); Tensor* binary_examples; OP_REQUIRES_OK( ctx, ctx->allocate_output("binary_examples", json_examples->shape(), &binary_examples)); for (int i = 0; i < json_examples->NumElements(); ++i) { const tstring& json_example = json_examples->flat<tstring>()(i); protobuf::io::ArrayInputStream in(json_example.data(), json_example.size()); TStringOutputStream out(&binary_examples->flat<tstring>()(i)); auto status = protobuf::util::JsonToBinaryStream( resolver_.get(), "type.googleapis.com/tensorflow.Example", &in, &out); OP_REQUIRES(ctx, status.ok(), errors::InvalidArgument("Error while parsing JSON: ", string(status.message()))); } } private: std::unique_ptr<protobuf::util::TypeResolver> resolver_; }; REGISTER_KERNEL_BUILDER(Name("DecodeJSONExample").Device(DEVICE_CPU), DecodeJSONExampleOp); #endif }
#include <unordered_map> #include "absl/base/call_once.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/feature.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { typedef std::map<std::tuple<int, int, int>, Tensor> ExampleTensorMap; class BytesFiller { public: BytesFiller() {} void operator()(Feature* f, int feature_size) const { for (int i = 0; i < feature_size; ++i) { f->mutable_bytes_list()->add_value("abcd1234abcd1234abcd1234abcd1234!"); } } Tensor make_dense_default(int feature_size) { return Tensor(dtype, TensorShape({feature_size})); } DataType dtype = DT_STRING; }; class Int64Filler { public: Int64Filler() {} void operator()(Feature* f, int feature_size) const { for (int i = 0; i < feature_size; ++i) { f->mutable_int64_list()->add_value(1729); } } Tensor make_dense_default(int feature_size) { return Tensor(dtype, TensorShape({feature_size})); } DataType dtype = DT_INT64; }; class FloatFiller { public: FloatFiller() {} void operator()(Feature* f, int feature_size) const { for (int i = 0; i < feature_size; ++i) { f->mutable_float_list()->add_value(1.729); } } Tensor make_dense_default(int feature_size) { return Tensor(dtype, TensorShape({feature_size})); } DataType dtype = DT_FLOAT; }; template <typename T> struct ExampleStore { private: static ExampleTensorMap serialized_example; static absl::once_flag flags_init; public: static ExampleTensorMap& GetSerializedExample() { absl::call_once(flags_init, [] { AddExample(&serialized_example, 10, 1, 1); AddExample(&serialized_example, 100, 1, 1); AddExample(&serialized_example, 1000, 1, 1); AddExample(&serialized_example, 10, 128, 1); AddExample(&serialized_example, 100, 128, 1); AddExample(&serialized_example, 1000, 128, 1); AddExample(&serialized_example, 10, 512, 1); AddExample(&serialized_example, 100, 512, 1); AddExample(&serialized_example, 1000, 512, 1); AddExample(&serialized_example, 1, 1, 10); AddExample(&serialized_example, 1, 1, 100); AddExample(&serialized_example, 1, 1, 1000); AddExample(&serialized_example, 1, 1, 10000); AddExample(&serialized_example, 1, 1, 100000); AddExample(&serialized_example, 1, 1, 1000000); AddExample(&serialized_example, 10, 1, 100000); AddExample(&serialized_example, 100, 1, 10000); AddExample(&serialized_example, 1000, 1, 1000); }); return serialized_example; } typedef T Filler; static void AddExample(ExampleTensorMap* examples, int num_keys, int batch_size, int feature_size) { Example example; Filler fill; Tensor record_string(DT_STRING, TensorShape({batch_size})); auto string_t = record_string.vec<tstring>(); example.Clear(); for (int b = 0; b < batch_size; ++b) { for (int k = 0; k < num_keys; ++k) { string k_str = strings::Printf("feature_%d", k); Feature f; fill(&f, feature_size); Features* features = example.mutable_features(); (*features->mutable_feature())[k_str] = f; } CHECK(SerializeToTString(example, &string_t(b))); } (*examples)[std::make_tuple(batch_size, num_keys, feature_size)] = record_string; } }; template <typename T> ExampleTensorMap ExampleStore<T>::serialized_example; template <typename T> absl::once_flag ExampleStore<T>::flags_init; template struct ExampleStore<BytesFiller>; template struct ExampleStore<Int64Filler>; template struct ExampleStore<FloatFiller>; enum BenchmarkType { kDense, kSparse, kVarLenDense, kRagged }; template <typename S, BenchmarkType b_type> struct BenchmarkOptions { int benchmark_type = b_type; typedef S Store; typename S::Filler filler; }; template <typename Options> static Graph* ParseExample(int batch_size, int num_keys, int feature_size) { Graph* g = new Graph(OpRegistry::Global()); Tensor& serialized = Options::Store::GetSerializedExample()[std::make_tuple( batch_size, num_keys, feature_size)]; Tensor names(DT_STRING, TensorShape({batch_size})); std::vector<NodeBuilder::NodeOut> sparse_keys; std::vector<NodeBuilder::NodeOut> dense_keys; std::vector<NodeBuilder::NodeOut> dense_defaults; std::vector<DataType> sparse_types; std::vector<PartialTensorShape> dense_shapes; Options opt; for (int i = 0; i < num_keys; ++i) { Tensor key(DT_STRING, TensorShape()); key.scalar<tstring>()() = strings::Printf("feature_%d", i); switch (opt.benchmark_type) { case kDense: dense_keys.emplace_back(test::graph::Constant(g, key)); dense_defaults.emplace_back(test::graph::Constant( g, opt.filler.make_dense_default(feature_size))); dense_shapes.push_back(PartialTensorShape({feature_size})); break; case kVarLenDense: dense_keys.emplace_back(test::graph::Constant(g, key)); dense_defaults.emplace_back( test::graph::Constant(g, opt.filler.make_dense_default(1))); dense_shapes.push_back(PartialTensorShape({-1})); break; case kSparse: sparse_keys.emplace_back(test::graph::Constant(g, key)); sparse_types.push_back(opt.filler.dtype); break; } } Node* ret; TF_EXPECT_OK(NodeBuilder(g->NewName("n"), "ParseExample") .Input(test::graph::Constant(g, serialized)) .Input(test::graph::Constant(g, names)) .Input(sparse_keys) .Input(dense_keys) .Input(dense_defaults) .Attr("sparse_types", sparse_types) .Attr("dense_shapes", dense_shapes) .Finalize(g, &ret)); FixupSourceAndSinkEdges(g); return g; } template <typename Options> static Graph* ParseExampleV2(int batch_size, int num_keys, int feature_size) { bool scalar_input = (batch_size == 0); Graph* g = new Graph(OpRegistry::Global()); Tensor& serialized_batch = Options::Store::GetSerializedExample()[std::make_tuple( scalar_input ? 1 : batch_size, num_keys, feature_size)]; Tensor serialized_example(DT_STRING, TensorShape()); Tensor names(DT_STRING, scalar_input ? TensorShape({}) : TensorShape({batch_size})); Tensor* serialized; if (scalar_input) { serialized_example.scalar<tstring>()() = serialized_batch.vec<tstring>()(0); serialized = &serialized_example; } else { serialized = &serialized_batch; } std::vector<NodeBuilder::NodeOut> dense_defaults; std::vector<DataType> sparse_types; std::vector<DataType> ragged_value_types; std::vector<DataType> ragged_split_types; std::vector<PartialTensorShape> dense_shapes; Tensor keys_t(DT_STRING, {static_cast<int32>(num_keys)}); auto keys_flat = keys_t.flat<tstring>(); Options opt; for (int i = 0; i < num_keys; ++i) { keys_flat(i) = strings::Printf("feature_%d", i); switch (opt.benchmark_type) { case kDense: dense_defaults.emplace_back(test::graph::Constant( g, opt.filler.make_dense_default(feature_size))); dense_shapes.push_back(PartialTensorShape({feature_size})); break; case kVarLenDense: dense_defaults.emplace_back( test::graph::Constant(g, opt.filler.make_dense_default(1))); dense_shapes.push_back(PartialTensorShape({-1})); break; case kSparse: sparse_types.push_back(opt.filler.dtype); break; case kRagged: ragged_value_types.push_back(opt.filler.dtype); ragged_split_types.push_back(DT_INT32); break; } } Tensor empty_keys(DT_STRING, {0}); auto bm_type = opt.benchmark_type; auto& sparse_keys = (bm_type == kSparse) ? keys_t : empty_keys; auto& dense_keys = (bm_type == kDense || bm_type == kVarLenDense) ? keys_t : empty_keys; auto& ragged_keys = (bm_type == kRagged) ? keys_t : empty_keys; int num_sparse = opt.benchmark_type == kSparse ? num_keys : 0; Node* ret; TF_EXPECT_OK(NodeBuilder(g->NewName("n"), "ParseExampleV2") .Input(test::graph::Constant(g, *serialized)) .Input(test::graph::Constant(g, names)) .Input(test::graph::Constant(g, sparse_keys)) .Input(test::graph::Constant(g, dense_keys)) .Input(test::graph::Constant(g, ragged_keys)) .Input(dense_defaults) .Attr("num_sparse", num_sparse) .Attr("sparse_types", sparse_types) .Attr("ragged_value_types", ragged_value_types) .Attr("ragged_split_types", ragged_split_types) .Attr("dense_shapes", dense_shapes) .Finalize(g, &ret)); FixupSourceAndSinkEdges(g); return g; } template <typename Options> static Graph* ParseSingleExample(int num_keys, int feature_size) { Graph* g = new Graph(OpRegistry::Global()); Tensor& serialized_batch_1 = Options::Store::GetSerializedExample()[std::make_tuple(1, num_keys, feature_size)]; Tensor serialized(DT_STRING, TensorShape()); serialized.scalar<tstring>()() = serialized_batch_1.vec<tstring>()(0); std::vector<string> sparse_keys; std::vector<string> dense_keys; std::vector<NodeBuilder::NodeOut> dense_defaults; std::vector<DataType> sparse_types; std::vector<PartialTensorShape> dense_shapes; Options opt; for (int i = 0; i < num_keys; ++i) { string key = strings::Printf("feature_%d", i); switch (opt.benchmark_type) { case kDense: dense_keys.push_back(key), dense_defaults.emplace_back(test::graph::Constant( g, opt.filler.make_dense_default(feature_size))); dense_shapes.push_back(PartialTensorShape({feature_size})); break; case kVarLenDense: dense_keys.push_back(key), dense_defaults.emplace_back( test::graph::Constant(g, opt.filler.make_dense_default(1))); dense_shapes.push_back(PartialTensorShape({-1})); break; case kSparse: sparse_keys.push_back(key), sparse_types.push_back(opt.filler.dtype); break; } } Node* ret; TF_EXPECT_OK(NodeBuilder(g->NewName("n"), "ParseSingleExample") .Input(test::graph::Constant(g, serialized)) .Input(dense_defaults) .Attr<int64_t>("num_sparse", sparse_keys.size()) .Attr("sparse_keys", sparse_keys) .Attr("sparse_types", sparse_types) .Attr("dense_keys", dense_keys) .Attr("dense_shapes", dense_shapes) .Finalize(g, &ret)); FixupSourceAndSinkEdges(g); return g; } typedef BenchmarkOptions<ExampleStore<BytesFiller>, kSparse> SparseString; typedef BenchmarkOptions<ExampleStore<BytesFiller>, kDense> DenseString; typedef BenchmarkOptions<ExampleStore<BytesFiller>, kVarLenDense> VarLenDenseString; typedef BenchmarkOptions<ExampleStore<BytesFiller>, kRagged> RaggedString; typedef BenchmarkOptions<ExampleStore<Int64Filler>, kSparse> SparseInt64; typedef BenchmarkOptions<ExampleStore<Int64Filler>, kDense> DenseInt64; typedef BenchmarkOptions<ExampleStore<Int64Filler>, kVarLenDense> VarLenDenseInt64; typedef BenchmarkOptions<ExampleStore<Int64Filler>, kRagged> RaggedInt64; typedef BenchmarkOptions<ExampleStore<FloatFiller>, kSparse> SparseFloat; typedef BenchmarkOptions<ExampleStore<FloatFiller>, kDense> DenseFloat; typedef BenchmarkOptions<ExampleStore<FloatFiller>, kVarLenDense> VarLenDenseFloat; typedef BenchmarkOptions<ExampleStore<FloatFiller>, kRagged> RaggedFloat; #define BM_ParseExample(TYPE, B, K, F) \ static void BM_ParseExample##_##TYPE##_##B##_##K##_##F( \ ::testing::benchmark::State& state) { \ int64_t items_per_iter = static_cast<int64_t>(B) * K * F; \ test::Benchmark("cpu", ParseExample<TYPE>(B, K, F), nullptr, nullptr, \ nullptr, "SINGLE_THREADED_EXECUTOR", false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \ items_per_iter); \ } \ BENCHMARK(BM_ParseExample##_##TYPE##_##B##_##K##_##F)->UseRealTime(); #define BM_AllParseExample(Type) \ BM_ParseExample(Type, 1, 10, 1); \ BM_ParseExample(Type, 128, 10, 1); \ BM_ParseExample(Type, 512, 10, 1); \ BM_ParseExample(Type, 1, 100, 1); \ BM_ParseExample(Type, 128, 100, 1); \ BM_ParseExample(Type, 512, 100, 1); \ BM_ParseExample(Type, 1, 1000, 1); \ BM_ParseExample(Type, 128, 1000, 1); \ BM_ParseExample(Type, 512, 1000, 1); \ BM_ParseExample(Type, 1, 1, 1000000); BM_AllParseExample(SparseString); BM_AllParseExample(DenseString); BM_AllParseExample(VarLenDenseString); BM_AllParseExample(SparseInt64); BM_AllParseExample(DenseInt64); BM_AllParseExample(VarLenDenseInt64); BM_AllParseExample(SparseFloat); BM_AllParseExample(DenseFloat); BM_AllParseExample(VarLenDenseFloat); #define BM_ParseExampleV2(TYPE, B, K, F) \ static void BM_ParseExampleV2##_##TYPE##_##B##_##K##_##F( \ ::testing::benchmark::State& state) { \ int64_t items_per_iter = static_cast<int64_t>(std::max(B, 1)) * K * F; \ test::Benchmark("cpu", ParseExampleV2<TYPE>(B, K, F), nullptr, nullptr, \ nullptr, "SINGLE_THREADED_EXECUTOR", \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \ items_per_iter); \ } \ BENCHMARK(BM_ParseExampleV2##_##TYPE##_##B##_##K##_##F)->UseRealTime(); #define BM_AllParseExampleV2(Type) \ \ BM_ParseExampleV2(Type, 1, 10, 1); \ BM_ParseExampleV2(Type, 128, 10, 1); \ BM_ParseExampleV2(Type, 512, 10, 1); \ BM_ParseExampleV2(Type, 1, 100, 1); \ BM_ParseExampleV2(Type, 128, 100, 1); \ BM_ParseExampleV2(Type, 512, 100, 1); \ BM_ParseExampleV2(Type, 1, 1000, 1); \ BM_ParseExampleV2(Type, 128, 1000, 1); \ BM_ParseExampleV2(Type, 512, 1000, 1); \ BM_ParseExampleV2(Type, 1, 1, 1000000); \ \ BM_ParseExampleV2(Type, 0, 10, 1); \ BM_ParseExampleV2(Type, 0, 100, 1); \ BM_ParseExampleV2(Type, 0, 1000, 1); \ BM_ParseExampleV2(Type, 0, 1, 10); \ BM_ParseExampleV2(Type, 0, 1, 100); \ BM_ParseExampleV2(Type, 0, 1, 1000); \ BM_ParseExampleV2(Type, 0, 1, 10000); \ BM_ParseExampleV2(Type, 0, 1, 100000); \ BM_ParseExampleV2(Type, 0, 1, 1000000); \ BM_ParseExampleV2(Type, 0, 10, 100000); \ BM_ParseExampleV2(Type, 0, 100, 10000); \ BM_ParseExampleV2(Type, 0, 1000, 1000); BM_AllParseExampleV2(SparseString); BM_AllParseExampleV2(DenseString); BM_AllParseExampleV2(VarLenDenseString); BM_AllParseExampleV2(RaggedString); BM_AllParseExampleV2(SparseInt64); BM_AllParseExampleV2(DenseInt64); BM_AllParseExampleV2(VarLenDenseInt64); BM_AllParseExampleV2(RaggedInt64); BM_AllParseExampleV2(SparseFloat); BM_AllParseExampleV2(DenseFloat); BM_AllParseExampleV2(VarLenDenseFloat); BM_AllParseExampleV2(RaggedFloat); #define BM_ParseSingleExample(TYPE, K, F) \ void BM_ParseSingleExample##_##TYPE##_1_##K##_##F( \ ::testing::benchmark::State& state) { \ int64_t items_per_iter = K * F; \ test::Benchmark("cpu", ParseSingleExample<TYPE>(K, F), nullptr, nullptr, \ nullptr, "SINGLE_THREADED_EXECUTOR", \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \ items_per_iter); \ } \ BENCHMARK(BM_ParseSingleExample##_##TYPE##_1_##K##_##F)->UseRealTime(); #define BM_AllParseSingleExample(Type) \ BM_ParseSingleExample(Type, 10, 1); \ BM_ParseSingleExample(Type, 100, 1); \ BM_ParseSingleExample(Type, 1000, 1); \ BM_ParseSingleExample(Type, 1, 10); \ BM_ParseSingleExample(Type, 1, 100); \ BM_ParseSingleExample(Type, 1, 1000); \ BM_ParseSingleExample(Type, 1, 10000); \ BM_ParseSingleExample(Type, 1, 100000); \ BM_ParseSingleExample(Type, 1, 1000000); \ BM_ParseSingleExample(Type, 10, 100000); \ BM_ParseSingleExample(Type, 100, 10000); \ BM_ParseSingleExample(Type, 1000, 1000); BM_AllParseSingleExample(SparseString); BM_AllParseSingleExample(DenseString); BM_AllParseSingleExample(VarLenDenseString); BM_AllParseSingleExample(SparseInt64); BM_AllParseSingleExample(DenseInt64); BM_AllParseSingleExample(VarLenDenseInt64); BM_AllParseSingleExample(SparseFloat); BM_AllParseSingleExample(DenseFloat); BM_AllParseSingleExample(VarLenDenseFloat); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/example_parsing_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/example_parsing_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
85410ddd-a164-4604-bf44-65fbae760e54
cpp
tensorflow/tensorflow
ragged_tensor_to_sparse_kernel
tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc
tensorflow/core/kernels/ragged_tensor_to_sparse_kernel_test.cc
#include <limits> #include <memory> #include <string> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { using errors::InvalidArgument; template <typename SPLITS_TYPE> class RaggedTensorToSparseOp : public OpKernel { public: using OpKernel::OpKernel; using ConstFlatSplits = typename TTypes<SPLITS_TYPE>::ConstFlat; void Compute(OpKernelContext* context) override { OpInputList rt_nested_splits_in; OP_REQUIRES_OK( context, context->input_list("rt_nested_splits", &rt_nested_splits_in)); const int rt_nested_splits_len = rt_nested_splits_in.size(); OP_REQUIRES(context, rt_nested_splits_len > 0, errors::InvalidArgument("rt_nested_splits must be non empty")); std::vector<ConstFlatSplits> rt_nested_splits; rt_nested_splits.reserve(rt_nested_splits_len); for (int i = 0; i < rt_nested_splits_len; ++i) { rt_nested_splits.push_back(rt_nested_splits_in[i].flat<SPLITS_TYPE>()); } const Tensor& rt_dense_values_in = context->input(rt_nested_splits_len); OP_REQUIRES_OK(context, ValidateInputs(rt_nested_splits, rt_dense_values_in)); std::vector<int64_t> index_prefix(rt_nested_splits_len); std::vector<std::vector<int64_t>> index_suffixes = MakeIndexSuffixes(rt_dense_values_in.shape()); const int64_t nvals = (rt_nested_splits.back()(rt_nested_splits.back().size() - 1) * index_suffixes.size()); const int64_t indices_len = rt_nested_splits_len + rt_dense_values_in.dims(); Tensor* sparse_indices_out = nullptr; OP_REQUIRES_OK( context, context->allocate_output(0, TensorShape({nvals, indices_len}), &sparse_indices_out)); auto sparse_indices = sparse_indices_out->tensor<int64_t, 2>(); std::vector<int64_t> pos(rt_nested_splits_len); int64_t& final_pos = pos[rt_nested_splits_len - 1]; int next_index = 0; int max_final_pos = rt_nested_splits.back().size() - 1; for (; final_pos < max_final_pos; ++final_pos) { for (int dim = rt_nested_splits_len - 2; dim >= 0; --dim) { while (IsCompleted(pos, dim, rt_nested_splits)) { pos[dim] += 1; } } for (int dim = 0; dim < index_prefix.size(); ++dim) { int start = dim > 0 ? rt_nested_splits[dim - 1](pos[dim - 1]) : 0; index_prefix[dim] = pos[dim] - start; } const auto& final_splits = rt_nested_splits[rt_nested_splits_len - 1]; int64_t slice_len = final_splits(final_pos + 1) - final_splits(final_pos); for (int64_t i = 0; i < slice_len; ++i) { for (const auto& index_suffix : index_suffixes) { int dim = 0; for (int64_t index : index_prefix) { sparse_indices(next_index, dim++) = index; } sparse_indices(next_index, dim++) = i; for (int64_t index : index_suffix) { sparse_indices(next_index, dim++) = index; } DCHECK_EQ(dim, indices_len); ++next_index; } } } DCHECK_EQ(next_index, nvals); if (rt_dense_values_in.dims() == 1) { context->set_output(1, rt_dense_values_in); } else { Tensor sparse_values_out(rt_dense_values_in.dtype()); bool shapes_match = sparse_values_out.CopyFrom( rt_dense_values_in, {rt_dense_values_in.NumElements()}); DCHECK(shapes_match); context->set_output(1, sparse_values_out); } int64_t ndims = rt_nested_splits_len + rt_dense_values_in.dims(); Tensor* sparse_dense_shape_out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, TensorShape({ndims}), &sparse_dense_shape_out)); auto sparse_dense_shape = sparse_dense_shape_out->vec<int64_t>(); sparse_dense_shape(0) = rt_nested_splits_in[0].dim_size(0) - 1; for (int dim = 0; dim < rt_nested_splits_len; ++dim) { const auto& splits = rt_nested_splits[dim]; SPLITS_TYPE max_width = 0; for (int i = 1; i < splits.size(); ++i) { max_width = std::max(max_width, splits(i) - splits(i - 1)); } sparse_dense_shape(dim + 1) = max_width; } for (int dim = 1; dim < rt_dense_values_in.dims(); ++dim) { sparse_dense_shape(dim + rt_nested_splits_len) = rt_dense_values_in.dim_size(dim); } } private: static ::tensorflow::Status ValidateInputs( std::vector<ConstFlatSplits> rt_nested_splits, const Tensor& rt_dense_values_in) { for (int i = 0; i < rt_nested_splits.size(); ++i) { if (rt_nested_splits[i].size() == 0) { return InvalidArgument("ragged splits may not be empty."); } if (rt_nested_splits[i](0) != 0) { return InvalidArgument("First value of ragged splits must be 0."); } for (int j = 1; j < rt_nested_splits[i].size(); ++j) { if (rt_nested_splits[i](j) < rt_nested_splits[i](j - 1)) { return InvalidArgument( "Ragged splits should be non decreasing, but we got ", rt_nested_splits[i](j - 1), " followed by ", rt_nested_splits[i](j)); } } if (i > 0) { SPLITS_TYPE last_split = rt_nested_splits[i - 1](rt_nested_splits[i - 1].size() - 1); if (rt_nested_splits[i].size() != last_split + 1) { return InvalidArgument( "Final value of ragged splits must match the length " "the corresponding ragged values."); } } } if (rt_dense_values_in.dim_size(0) != rt_nested_splits.back()(rt_nested_splits.back().size() - 1)) { return InvalidArgument( "Final value of ragged splits must match the length " "the corresponding ragged values."); } return absl::OkStatus(); } static std::vector<std::vector<int64_t>> MakeIndexSuffixes( const TensorShape& values_shape) { std::vector<std::vector<int64_t>> suffixes{{}}; for (int dim = 1; dim < values_shape.dims(); ++dim) { std::vector<std::vector<int64_t>> new_suffixes; for (const auto& suffix : suffixes) { for (int i = 0; i < values_shape.dim_size(dim); ++i) { new_suffixes.push_back(suffix); new_suffixes.back().push_back(i); } } suffixes.swap(new_suffixes); } return suffixes; } static bool IsCompleted( const std::vector<int64_t>& pos, int dim, const std::vector<ConstFlatSplits>& rt_nested_splits) { int64_t current_child = pos[dim + 1]; int64_t limit_child = rt_nested_splits[dim](pos[dim] + 1); return current_child >= limit_child; } }; REGISTER_KERNEL_BUILDER(Name("RaggedTensorToSparse") .Device(DEVICE_CPU) .TypeConstraint<int32>("Tsplits"), RaggedTensorToSparseOp<int32>); REGISTER_KERNEL_BUILDER(Name("RaggedTensorToSparse") .Device(DEVICE_CPU) .TypeConstraint<int64_t>("Tsplits"), RaggedTensorToSparseOp<int64_t>); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class RaggedTensorToSparseTest : public ::tensorflow::OpsTestBase { protected: static constexpr int kSparseIndicesOutput = 0; static constexpr int kSparseValuesOutput = 1; static constexpr int kSparseDenseShapeOutput = 2; template <typename T> void BuildRaggedTensorToSparseGraph( const std::vector<std::vector<int64_t>>& rt_nested_splits, const TensorShape& rt_dense_values_shape, const std::vector<T>& rt_dense_values) { const auto& dtype = DataTypeToEnum<T>::v(); int64_t num_splits = rt_nested_splits.size(); TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedTensorToSparse") .Input(FakeInput(num_splits)) .Input(FakeInput(dtype)) .Attr("RAGGED_RANK", num_splits) .Attr("T", dtype) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); for (const auto& splits : rt_nested_splits) { int64_t splits_size = splits.size(); AddInputFromArray<int64_t>(TensorShape({splits_size}), splits); } AddInputFromArray<T>(rt_dense_values_shape, rt_dense_values); } }; TEST_F(RaggedTensorToSparseTest, OneSplits_Values1D) { BuildRaggedTensorToSparseGraph<int>({{0, 3, 3, 5, 6}}, TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>( *GetOutput(kSparseIndicesOutput), test::AsTensor<int64_t>({0, 0, 0, 1, 0, 2, 2, 0, 2, 1, 3, 0}, {6, 2})); test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput), test::AsTensor<int>({1, 2, 3, 4, 5, 6})); test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput), test::AsTensor<int64_t>({4, 3})); } TEST_F(RaggedTensorToSparseTest, EmptyRows) { BuildRaggedTensorToSparseGraph<int>({{0, 0, 4, 4, 6, 6}}, TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>( *GetOutput(kSparseIndicesOutput), test::AsTensor<int64_t>({1, 0, 1, 1, 1, 2, 1, 3, 3, 0, 3, 1}, {6, 2})); test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput), test::AsTensor<int>({1, 2, 3, 4, 5, 6})); test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput), test::AsTensor<int64_t>({5, 4})); } TEST_F(RaggedTensorToSparseTest, OneSplits_Values2D) { BuildRaggedTensorToSparseGraph<int>( {{0, 3, 3, 5, 6}}, TensorShape({6, 2}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); TF_ASSERT_OK(RunOpKernel()); std::vector<int64_t> expected_splits_12_3 = { 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 1, 2, 0, 0, 2, 0, 1, 2, 1, 0, 2, 1, 1, 3, 0, 0, 3, 0, 1}; std::vector<int> expected_values = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; test::ExpectTensorEqual<int64_t>( *GetOutput(kSparseIndicesOutput), test::AsTensor<int64_t>(expected_splits_12_3, {12, 3})); test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput), test::AsTensor<int>(expected_values)); test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput), test::AsTensor<int64_t>({4, 3, 2})); } TEST_F(RaggedTensorToSparseTest, TwoSplits_Values1D) { BuildRaggedTensorToSparseGraph<int>( {{0, 3, 3, 5, 7}, {0, 1, 3, 3, 8, 11, 11, 15}}, TensorShape({15}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}); TF_ASSERT_OK(RunOpKernel()); std::vector<int64_t> expected_splits_15_3 = { 0, 0, 0, 0, 1, 0, 0, 1, 1, 2, 0, 0, 2, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0, 4, 2, 1, 0, 2, 1, 1, 2, 1, 2, 3, 1, 0, 3, 1, 1, 3, 1, 2, 3, 1, 3}; std::vector<int> expected_values = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput), test::AsTensor<int>(expected_values)); test::ExpectTensorEqual<int64_t>( *GetOutput(kSparseIndicesOutput), test::AsTensor<int64_t>(expected_splits_15_3, {15, 3})); test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput), test::AsTensor<int64_t>({4, 3, 5})); } TEST_F(RaggedTensorToSparseTest, ShapeFn) { ShapeInferenceTestOp op("RaggedTensorToSparse"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0); INFER_ERROR("Requires RAGGED_RANK>0", op, "?"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1); INFER_OK(op, "?;?", "[?,?];[?];[?]"); INFER_OK(op, "?;[?]", "[?,2];[?];[2]"); INFER_OK(op, "?;[?,?]", "[?,3];[?];[3]"); INFER_OK(op, "[?];[5]", "[5,2];[5];[2]"); INFER_OK(op, "[?];[5,2]", "[10,3];[10];[3]"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2); INFER_OK(op, "?;?;?", "[?,?];[?];[?]"); INFER_OK(op, "?;?;[?]", "[?,3];[?];[3]"); INFER_OK(op, "?;?;[?,?]", "[?,4];[?];[4]"); INFER_OK(op, "[?];[?];[5]", "[5,3];[5];[3]"); INFER_OK(op, "[?];[?];[5,2]", "[10,4];[10];[4]"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3); INFER_OK(op, "?;?;?;?", "[?,?];[?];[?]"); INFER_OK(op, "?;?;?;[?]", "[?,4];[?];[4]"); INFER_OK(op, "?;?;?;[5]", "[5,4];[5];[4]"); } TEST_F(RaggedTensorToSparseTest, NoSplits) { const auto& dtype = DataTypeToEnum<int>::v(); TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedTensorToSparse") .Input(FakeInput(0)) .Input(FakeInput(dtype)) .Attr("RAGGED_RANK", 0) .Attr("T", dtype) .Finalize(node_def())); EXPECT_TRUE(absl::StartsWith( InitOp().message(), "Value for attr 'RAGGED_RANK' of 0 must be at least minimum 1")); } TEST_F(RaggedTensorToSparseTest, InvalidArg_BadSplitStart) { BuildRaggedTensorToSparseGraph<int>({{5, 7, 10}}, TensorShape({0}), {}); EXPECT_EQ("First value of ragged splits must be 0.", RunOpKernel().message()); } TEST_F(RaggedTensorToSparseTest, InvalidArg_BadSplitLengths1) { BuildRaggedTensorToSparseGraph<int>({{0, 5}, {0, 2, 4, 6}}, TensorShape({0}), {}); EXPECT_EQ( "Final value of ragged splits must match the length " "the corresponding ragged values.", RunOpKernel().message()); } TEST_F(RaggedTensorToSparseTest, InvalidArg_BadSplitLengths2) { BuildRaggedTensorToSparseGraph<int>({{0, 5}}, TensorShape({0}), {}); EXPECT_EQ( "Final value of ragged splits must match the length " "the corresponding ragged values.", RunOpKernel().message()); } TEST_F(RaggedTensorToSparseTest, InvalidArg_EmptySplits) { BuildRaggedTensorToSparseGraph<int>({{}}, TensorShape({0}), {}); EXPECT_EQ("ragged splits may not be empty.", RunOpKernel().message()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
95e96d4b-b827-4843-9abd-3d2159c0edd0
cpp
tensorflow/tensorflow
shape_ops
tensorflow/core/kernels/shape_ops.cc
tensorflow/core/kernels/shape_ops_test.cc
#include "tensorflow/core/kernels/shape_ops.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/register_types.h" namespace tensorflow { REGISTER_KERNEL_BUILDER(Name("Shape") .Device(DEVICE_CPU) .HostMemory("output") .TypeConstraint<int32>("out_type"), ShapeOp<int32>); REGISTER_KERNEL_BUILDER(Name("Shape") .Device(DEVICE_CPU) .HostMemory("output") .TypeConstraint<int64_t>("out_type"), ShapeOp<int64_t>); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("Shape") \ .Device(DEVICE_GPU) \ .HostMemory("output") \ .TypeConstraint<int32>("out_type") \ .TypeConstraint<type>("T"), \ ShapeOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("Shape") \ .Device(DEVICE_GPU) \ .HostMemory("output") \ .TypeConstraint<int64_t>("out_type") \ .TypeConstraint<type>("T"), \ ShapeOp<int64_t>); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL); TF_CALL_bool(REGISTER_GPU_KERNEL); TF_CALL_variant(REGISTER_GPU_KERNEL); TF_CALL_tstring(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL REGISTER_KERNEL_BUILDER(Name("Shape") .Device(DEVICE_GPU) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int32>("out_type"), ShapeOp<int32>); REGISTER_KERNEL_BUILDER(Name("Shape") .Device(DEVICE_GPU) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("out_type"), ShapeOp<int64_t>); #endif #define REGISTER_DEFAULT_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("Shape") \ .Device(DEVICE_DEFAULT) \ .HostMemory("output") \ .TypeConstraint<int32>("out_type") \ .TypeConstraint<type>("T"), \ ShapeOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("Shape") \ .Device(DEVICE_DEFAULT) \ .HostMemory("output") \ .TypeConstraint<int64_t>("out_type") \ .TypeConstraint<type>("T"), \ ShapeOp<int64_t>); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL); TF_CALL_bool(REGISTER_DEFAULT_KERNEL); TF_CALL_variant(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL REGISTER_KERNEL_BUILDER(Name("Shape") .Device(DEVICE_DEFAULT) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int32>("out_type"), ShapeOp<int32>); REGISTER_KERNEL_BUILDER(Name("Shape") .Device(DEVICE_DEFAULT) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("out_type"), ShapeOp<int64_t>); REGISTER_KERNEL_BUILDER(Name("ShapeN") .Device(DEVICE_CPU) .HostMemory("output") .TypeConstraint<int32>("out_type"), ShapeNOp<int32>); REGISTER_KERNEL_BUILDER(Name("ShapeN") .Device(DEVICE_CPU) .HostMemory("output") .TypeConstraint<int64_t>("out_type"), ShapeNOp<int64_t>); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("ShapeN") \ .Device(DEVICE_GPU) \ .HostMemory("output") \ .TypeConstraint<int32>("out_type") \ .TypeConstraint<type>("T"), \ ShapeNOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("ShapeN") \ .Device(DEVICE_GPU) \ .HostMemory("output") \ .TypeConstraint<int64_t>("out_type") \ .TypeConstraint<type>("T"), \ ShapeNOp<int64_t>) TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL); TF_CALL_bool(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL REGISTER_KERNEL_BUILDER(Name("ShapeN") .Device(DEVICE_GPU) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int32>("out_type"), ShapeNOp<int32>); REGISTER_KERNEL_BUILDER(Name("ShapeN") .Device(DEVICE_GPU) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("out_type"), ShapeNOp<int64_t>); #endif #define REGISTER_DEFAULT_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("ShapeN") \ .Device(DEVICE_DEFAULT) \ .HostMemory("output") \ .TypeConstraint<int32>("out_type") \ .TypeConstraint<type>("T"), \ ShapeNOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("ShapeN") \ .Device(DEVICE_DEFAULT) \ .HostMemory("output") \ .TypeConstraint<int64_t>("out_type") \ .TypeConstraint<type>("T"), \ ShapeNOp<int64_t>) TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL); TF_CALL_bool(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL REGISTER_KERNEL_BUILDER(Name("ShapeN") .Device(DEVICE_DEFAULT) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int32>("out_type"), ShapeNOp<int32>); REGISTER_KERNEL_BUILDER(Name("ShapeN") .Device(DEVICE_DEFAULT) .HostMemory("input") .HostMemory("output") .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("out_type"), ShapeNOp<int64_t>); REGISTER_KERNEL_BUILDER(Name("Rank").Device(DEVICE_CPU).HostMemory("output"), RankOp); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("Rank") \ .Device(DEVICE_GPU) \ .TypeConstraint<type>("T") \ .HostMemory("output"), \ RankOp); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL); TF_CALL_variant(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL REGISTER_KERNEL_BUILDER(Name("Rank") .Device(DEVICE_GPU) .TypeConstraint<int32>("T") .HostMemory("input") .HostMemory("output"), RankOp); REGISTER_KERNEL_BUILDER(Name("Rank") .Device(DEVICE_GPU) .TypeConstraint<bool>("T") .HostMemory("input") .HostMemory("output"), RankOp); #endif #define REGISTER_DEFAULT_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("Rank") \ .Device(DEVICE_DEFAULT) \ .TypeConstraint<type>("T") \ .HostMemory("output"), \ RankOp); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL); TF_CALL_variant(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL REGISTER_KERNEL_BUILDER(Name("Rank") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .HostMemory("input") .HostMemory("output"), RankOp); REGISTER_KERNEL_BUILDER(Name("Rank") .Device(DEVICE_DEFAULT) .TypeConstraint<bool>("T") .HostMemory("input") .HostMemory("output"), RankOp); REGISTER_KERNEL_BUILDER(Name("Size") .Device(DEVICE_CPU) .HostMemory("output") .TypeConstraint<int32>("out_type"), SizeOp<int32>); REGISTER_KERNEL_BUILDER(Name("Size") .Device(DEVICE_CPU) .HostMemory("output") .TypeConstraint<int64_t>("out_type"), SizeOp<int64_t>); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("Size") \ .Device(DEVICE_GPU) \ .TypeConstraint<type>("T") \ .TypeConstraint<int32>("out_type") \ .HostMemory("output"), \ SizeOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("Size") \ .Device(DEVICE_GPU) \ .TypeConstraint<type>("T") \ .TypeConstraint<int64_t>("out_type") \ .HostMemory("output"), \ SizeOp<int64_t>); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL); TF_CALL_bool(REGISTER_GPU_KERNEL); TF_CALL_variant(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL REGISTER_KERNEL_BUILDER(Name("Size") .Device(DEVICE_GPU) .TypeConstraint<int32>("T") .TypeConstraint<int32>("out_type") .HostMemory("input") .HostMemory("output"), SizeOp<int32>); REGISTER_KERNEL_BUILDER(Name("Size") .Device(DEVICE_GPU) .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("out_type") .HostMemory("input") .HostMemory("output"), SizeOp<int64_t>); #endif #define REGISTER_DEFAULT_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("Size") \ .Device(DEVICE_DEFAULT) \ .TypeConstraint<type>("T") \ .TypeConstraint<int32>("out_type") \ .HostMemory("output"), \ SizeOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("Size") \ .Device(DEVICE_DEFAULT) \ .TypeConstraint<type>("T") \ .TypeConstraint<int64_t>("out_type") \ .HostMemory("output"), \ SizeOp<int64_t>); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL); TF_CALL_bool(REGISTER_DEFAULT_KERNEL); TF_CALL_variant(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL REGISTER_KERNEL_BUILDER(Name("Size") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .TypeConstraint<int32>("out_type") .HostMemory("input") .HostMemory("output"), SizeOp<int32>); REGISTER_KERNEL_BUILDER(Name("Size") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("out_type") .HostMemory("input") .HostMemory("output"), SizeOp<int64_t>); REGISTER_KERNEL_BUILDER(Name("ExpandDims") .Device(DEVICE_CPU) .HostMemory("dim") .TypeConstraint<int32>("Tdim"), ExpandDimsOp<int32>); REGISTER_KERNEL_BUILDER(Name("ExpandDims") .Device(DEVICE_CPU) .HostMemory("dim") .TypeConstraint<int64_t>("Tdim"), ExpandDimsOp<int64_t>); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("ExpandDims") \ .Device(DEVICE_GPU) \ .TypeConstraint<type>("T") \ .TypeConstraint<int32>("Tdim") \ .HostMemory("dim"), \ ExpandDimsOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("ExpandDims") \ .Device(DEVICE_GPU) \ .TypeConstraint<type>("T") \ .TypeConstraint<int64_t>("Tdim") \ .HostMemory("dim"), \ ExpandDimsOp<int64_t>); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL); TF_CALL_bool(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL REGISTER_KERNEL_BUILDER(Name("ExpandDims") .Device(DEVICE_GPU) .TypeConstraint<int32>("T") .TypeConstraint<int32>("Tdim") .HostMemory("input") .HostMemory("dim") .HostMemory("output"), ExpandDimsOp<int32>); REGISTER_KERNEL_BUILDER(Name("ExpandDims") .Device(DEVICE_GPU) .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("Tdim") .HostMemory("input") .HostMemory("dim") .HostMemory("output"), ExpandDimsOp<int64_t>); #endif #define REGISTER_DEFAULT_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("ExpandDims") \ .Device(DEVICE_DEFAULT) \ .TypeConstraint<type>("T") \ .TypeConstraint<int32>("Tdim") \ .HostMemory("dim"), \ ExpandDimsOp<int32>); \ REGISTER_KERNEL_BUILDER(Name("ExpandDims") \ .Device(DEVICE_DEFAULT) \ .TypeConstraint<type>("T") \ .TypeConstraint<int64_t>("Tdim") \ .HostMemory("dim"), \ ExpandDimsOp<int64_t>); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL); TF_CALL_bool(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL REGISTER_KERNEL_BUILDER(Name("ExpandDims") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .TypeConstraint<int32>("Tdim") .HostMemory("input") .HostMemory("dim") .HostMemory("output"), ExpandDimsOp<int32>); REGISTER_KERNEL_BUILDER(Name("ExpandDims") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .TypeConstraint<int64_t>("Tdim") .HostMemory("input") .HostMemory("dim") .HostMemory("output"), ExpandDimsOp<int64_t>); REGISTER_KERNEL_BUILDER(Name("Squeeze").Device(DEVICE_CPU), SqueezeOp); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER( \ Name("Squeeze").Device(DEVICE_GPU).TypeConstraint<type>("T"), \ SqueezeOp); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL); TF_CALL_bool(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL REGISTER_KERNEL_BUILDER(Name("Squeeze") .Device(DEVICE_GPU) .TypeConstraint<int32>("T") .HostMemory("input") .HostMemory("output"), SqueezeOp); #endif #define REGISTER_DEFAULT_KERNEL(type) \ REGISTER_KERNEL_BUILDER( \ Name("Squeeze").Device(DEVICE_DEFAULT).TypeConstraint<type>("T"), \ SqueezeOp); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL); TF_CALL_bool(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL REGISTER_KERNEL_BUILDER(Name("Squeeze") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .HostMemory("input") .HostMemory("output"), SqueezeOp); class EnsureShapeOp : public OpKernel { public: explicit EnsureShapeOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("shape", &expected_shape_)); } void Compute(OpKernelContext* ctx) override { TensorShape shape; OP_REQUIRES_OK(ctx, shape_op_helpers::GetShape(ctx, 0, &shape)); if (!expected_shape_.IsCompatibleWith(shape)) { ctx->SetStatus(errors::InvalidArgument( "Shape of tensor ", this->def().input(0), " ", shape.DebugString(), " is not compatible with expected shape ", expected_shape_.DebugString(), ".")); } if (IsRefType(ctx->input_dtype(0))) { ctx->forward_ref_input_to_ref_output(0, 0); } else { ctx->set_output(0, ctx->input(0)); } } bool IsExpensive() override { return false; } private: PartialTensorShape expected_shape_; }; REGISTER_KERNEL_BUILDER(Name("EnsureShape").Device(DEVICE_CPU), EnsureShapeOp); #define REGISTER_DEVICE_KERNEL(type) \ REGISTER_KERNEL_BUILDER( \ Name("EnsureShape").Device(DEVICE_DEFAULT).TypeConstraint<type>("T"), \ EnsureShapeOp) TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEVICE_KERNEL); REGISTER_DEVICE_KERNEL(Variant); #undef REGISTER_DEVICE_KERNEL #define REGISTER_DEVICE_HOST_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("EnsureShape") \ .Device(DEVICE_DEFAULT) \ .HostMemory("input") \ .HostMemory("output") \ .TypeConstraint<type>("T"), \ EnsureShapeOp) REGISTER_DEVICE_HOST_KERNEL(int32); REGISTER_DEVICE_HOST_KERNEL(bool); REGISTER_DEVICE_HOST_KERNEL(tstring); REGISTER_DEVICE_HOST_KERNEL(ResourceHandle); #undef REGISTER_DEVICE_HOST_KERNEL }
#include <functional> #include <memory> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { static void BM_ExpandDims(::testing::benchmark::State& state) { Graph* g = new Graph(OpRegistry::Global()); Tensor input(DT_INT32, TensorShape({1, 1, 1, 1})); input.flat<int32>()(0) = 10; Tensor axis(DT_INT32, TensorShape({})); axis.flat<int32>()(0) = 2; Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "ExpandDims") .Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, axis)) .Attr("T", DT_INT32) .Attr("Tdim", DT_INT32) .Finalize(g, &node)); FixupSourceAndSinkEdges(g); test::Benchmark("cpu", g, nullptr, nullptr, nullptr, "SINGLE_THREADED_EXECUTOR", false) .Run(state); } BENCHMARK(BM_ExpandDims)->UseRealTime(); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/shape_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/shape_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
03b2db7f-8707-47be-955c-5eccfd5e8710
cpp
tensorflow/tensorflow
bincount_op
tensorflow/compiler/tf2xla/kernels/bincount_op.cc
tensorflow/core/kernels/bincount_op_test.cc
#include <memory> #include <vector> #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/arithmetic.h" #include "xla/hlo/builder/lib/comparators.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/xla_computation.h" #include "xla/shape_util.h" namespace tensorflow { namespace { class DenseBincountOp : public XlaOpKernel { public: explicit DenseBincountOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { (void)ctx->GetAttr("binary_output", &binary_output_); } private: bool binary_output_ = false; void Compile(XlaOpKernelContext* ctx) override { int64_t output_size; xla::XlaOp output_size_param = ctx->Input("size"); absl::StatusOr<xla::Shape> output_shape_or = ctx->builder()->GetShape(output_size_param); OP_REQUIRES_OK(ctx, output_shape_or.status()); auto output_shape_param = output_shape_or.value(); auto output_rank = output_shape_param.rank(); OP_REQUIRES(ctx, output_rank == 0, errors::InvalidArgument("Shape must be rank 0 but is rank ", output_rank)); OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar("size", &output_size)); OP_REQUIRES(ctx, output_size >= 0, errors::InvalidArgument("size (", output_size, ") must be non-negative")); xla::XlaOp idx, updates, output; xla::XlaOp input = ctx->Input(0); auto input_xla_type = ctx->input_xla_type(0); xla::PrimitiveType dtype = ctx->InputXlaType("weights"); auto zero = xla::Zero(ctx->builder(), dtype); auto one = xla::One(ctx->builder(), dtype); absl::StatusOr<xla::Shape> input_shape_or = ctx->builder()->GetShape(input); OP_REQUIRES_OK(ctx, input_shape_or.status()); auto input_shape = input_shape_or.value(); auto rank = input_shape.rank(); OP_REQUIRES(ctx, rank <= 2, errors::InvalidArgument( "Shape must be at most rank 2 but is rank ", rank)); xla::XlaOp weights = ctx->Input(2); absl::StatusOr<xla::Shape> weights_shape_or = ctx->builder()->GetShape(weights); OP_REQUIRES_OK(ctx, weights_shape_or.status()); auto weights_shape = weights_shape_or.value(); OP_REQUIRES(ctx, xla::ShapeUtil::CompatibleIgnoringElementType(weights_shape, input_shape) || (weights_shape.dimensions_size() > 0 && weights_shape.dimensions(0) == 0), errors::InvalidArgument( "`weights` must be the same shape as `arr` or a length-0 " "`Tensor`, in which case it acts as all weights equal to " "1. Received ", weights_shape.DebugString())); auto size = input_shape.dimensions(0); if (!size) { output = xla::Broadcast(zero, {output_size}); ctx->SetOutput(0, output); return; } auto weights_size = weights_shape.dimensions(0); bool has_weights = false; if (weights_size) { has_weights = true; } xla::Shape output_shape = xla::ShapeUtil::MakeShape(dtype, {output_size}); xla::ScatterDimensionNumbers scatter_dnums; scatter_dnums.set_index_vector_dim(1); scatter_dnums.add_inserted_window_dims(0); scatter_dnums.add_scatter_dims_to_operand_dims(0); if (rank == 2) { output_shape = xla::ShapeUtil::MakeShape(dtype, {size, output_size}); scatter_dnums.add_inserted_window_dims(1); scatter_dnums.add_scatter_dims_to_operand_dims(1); auto i_shape = xla::ShapeUtil::MakeShape(input_xla_type, {input_shape.dimensions()}); auto i = xla::Iota(ctx->builder(), i_shape, 0); i = xla::Reshape( i, {input_shape.dimensions(0) * input_shape.dimensions(1), 1}); auto j = xla::Reshape( input, {input_shape.dimensions(0) * input_shape.dimensions(1), 1}); std::vector<xla::XlaOp> iotas_to_concat; iotas_to_concat.push_back(i); iotas_to_concat.push_back(j); idx = xla::ConcatInDim(ctx->builder(), iotas_to_concat, 1); updates = xla::Broadcast( one, {input_shape.dimensions(0) * input_shape.dimensions(1)}); output = xla::Broadcast( zero, {output_shape.dimensions(0), output_shape.dimensions(1)}); if (has_weights && !binary_output_) { weights = xla::Reshape( weights, {input_shape.dimensions(0) * input_shape.dimensions(1)}); updates = weights; } } else { input = xla::Reshape(input, {size, 1}); idx = xla::Reshape(input, {size, 1}); updates = xla::Broadcast(one, {size}); output = xla::Broadcast(zero, {output_size}); if (has_weights && !binary_output_) { updates = weights; } } xla::XlaComputation assn_computation = [&] { std::unique_ptr<xla::XlaBuilder> subb = ctx->builder()->CreateSubBuilder("scatter_bincount"); xla::Shape param_shape = xla::ShapeUtil::MakeShape(dtype, {}); auto p0 = xla::Parameter(subb.get(), 0, param_shape, "p0"); auto p1 = xla::Parameter(subb.get(), 1, param_shape, "p1"); if (!binary_output_) { xla::Add(p0, p1); } return subb->BuildAndNoteError(); }(); output = xla::Scatter(output, idx, updates, assn_computation, scatter_dnums, false, false); ctx->SetOutput(0, output); } }; REGISTER_XLA_OP(Name("DenseBincount").CompileTimeConstantInput("size"), DenseBincountOp); REGISTER_XLA_OP(Name("Bincount").CompileTimeConstantInput("size"), DenseBincountOp); } }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* Bincount(int arr_size, int nbins) { Graph* g = new Graph(OpRegistry::Global()); Tensor arr(DT_INT32, TensorShape({arr_size})); arr.flat<int32>() = arr.flat<int32>().setRandom().abs(); Tensor size(DT_INT32, TensorShape({static_cast<int32>(1)})); size.flat<int32>()(0) = static_cast<int32>(nbins); Tensor weights(DT_INT32, TensorShape({0})); Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Bincount") .Input(test::graph::Constant(g, arr)) .Input(test::graph::Constant(g, size)) .Input(test::graph::Constant(g, weights)) .Attr("T", DT_INT32) .Finalize(g, &node)); return g; } #define BM_BincountDev(K, NBINS, type) \ static void BM_Bincount##_##type##_##K##_##NBINS( \ ::testing::benchmark::State& state) { \ test::Benchmark(#type, Bincount(K * 1024, NBINS), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * K * \ 1024); \ } \ BENCHMARK(BM_Bincount##_##type##_##K##_##NBINS); BM_BincountDev(32, 1000, cpu); BM_BincountDev(32, 2000, cpu); BM_BincountDev(32, 5000, cpu); BM_BincountDev(64, 1000, cpu); BM_BincountDev(64, 2000, cpu); BM_BincountDev(64, 5000, cpu); BM_BincountDev(128, 1000, cpu); BM_BincountDev(128, 2000, cpu); BM_BincountDev(128, 5000, cpu); BM_BincountDev(32, 1000, gpu); BM_BincountDev(32, 2000, gpu); BM_BincountDev(32, 5000, gpu); BM_BincountDev(64, 1000, gpu); BM_BincountDev(64, 2000, gpu); BM_BincountDev(64, 5000, gpu); BM_BincountDev(128, 1000, gpu); BM_BincountDev(128, 2000, gpu); BM_BincountDev(128, 5000, gpu); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/bincount_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/bincount_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
13d35957-77ef-4355-a5a8-1cecf402d99c
cpp
tensorflow/tensorflow
spectrogram
tensorflow/lite/kernels/internal/spectrogram.cc
tensorflow/core/kernels/spectrogram_test.cc
#include "tensorflow/lite/kernels/internal/spectrogram.h" #include <assert.h> #include <math.h> #include <stdint.h> #include "third_party/fft2d/fft.h" namespace tflite { namespace internal { using std::complex; namespace { void GetPeriodicHann(int window_length, std::vector<double>* window) { const double pi = std::atan(1.0) * 4.0; window->resize(window_length); for (int i = 0; i < window_length; ++i) { (*window)[i] = 0.5 - 0.5 * cos((2.0 * pi * i) / window_length); } } } bool Spectrogram::Initialize(int window_length, int step_length) { std::vector<double> window; GetPeriodicHann(window_length, &window); return Initialize(window, step_length); } inline int Log2Floor(uint32_t n) { if (n == 0) return -1; int log = 0; uint32_t value = n; for (int i = 4; i >= 0; --i) { int shift = (1 << i); uint32_t x = value >> shift; if (x != 0) { value = x; log += shift; } } return log; } inline int Log2Ceiling(uint32_t n) { int floor = Log2Floor(n); if (n == (n & ~(n - 1))) return floor; else return floor + 1; } inline uint32_t NextPowerOfTwo(uint32_t value) { int exponent = Log2Ceiling(value); return 1 << exponent; } bool Spectrogram::Initialize(const std::vector<double>& window, int step_length) { window_length_ = window.size(); window_ = window; if (window_length_ < 2) { initialized_ = false; return false; } step_length_ = step_length; if (step_length_ < 1) { initialized_ = false; return false; } fft_length_ = NextPowerOfTwo(window_length_); output_frequency_channels_ = 1 + fft_length_ / 2; fft_input_output_.assign(fft_length_ + 2, 0.0); int half_fft_length = fft_length_ / 2; fft_double_working_area_.assign(half_fft_length, 0.0); fft_integer_working_area_.assign(2 + static_cast<int>(sqrt(half_fft_length)), 0); fft_integer_working_area_[0] = 0; input_queue_.clear(); samples_to_next_step_ = window_length_; initialized_ = true; return true; } template <class InputSample, class OutputSample> bool Spectrogram::ComputeComplexSpectrogram( const std::vector<InputSample>& input, std::vector<std::vector<complex<OutputSample>>>* output) { if (!initialized_) { return false; } output->clear(); int input_start = 0; while (GetNextWindowOfSamples(input, &input_start)) { ProcessCoreFFT(); output->resize(output->size() + 1); auto& spectrogram_slice = output->back(); spectrogram_slice.resize(output_frequency_channels_); for (int i = 0; i < output_frequency_channels_; ++i) { spectrogram_slice[i] = complex<OutputSample>( fft_input_output_[2 * i], fft_input_output_[2 * i + 1]); } } return true; } template bool Spectrogram::ComputeComplexSpectrogram( const std::vector<float>& input, std::vector<std::vector<complex<float>>>*); template bool Spectrogram::ComputeComplexSpectrogram( const std::vector<double>& input, std::vector<std::vector<complex<float>>>*); template bool Spectrogram::ComputeComplexSpectrogram( const std::vector<float>& input, std::vector<std::vector<complex<double>>>*); template bool Spectrogram::ComputeComplexSpectrogram( const std::vector<double>& input, std::vector<std::vector<complex<double>>>*); template <class InputSample, class OutputSample> bool Spectrogram::ComputeSquaredMagnitudeSpectrogram( const std::vector<InputSample>& input, std::vector<std::vector<OutputSample>>* output) { if (!initialized_) { return false; } output->clear(); int input_start = 0; while (GetNextWindowOfSamples(input, &input_start)) { ProcessCoreFFT(); output->resize(output->size() + 1); auto& spectrogram_slice = output->back(); spectrogram_slice.resize(output_frequency_channels_); for (int i = 0; i < output_frequency_channels_; ++i) { const double re = fft_input_output_[2 * i]; const double im = fft_input_output_[2 * i + 1]; spectrogram_slice[i] = re * re + im * im; } } return true; } template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram( const std::vector<float>& input, std::vector<std::vector<float>>*); template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram( const std::vector<double>& input, std::vector<std::vector<float>>*); template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram( const std::vector<float>& input, std::vector<std::vector<double>>*); template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram( const std::vector<double>& input, std::vector<std::vector<double>>*); template <class InputSample> bool Spectrogram::GetNextWindowOfSamples(const std::vector<InputSample>& input, int* input_start) { auto input_it = input.begin() + *input_start; int input_remaining = input.end() - input_it; if (samples_to_next_step_ > input_remaining) { input_queue_.insert(input_queue_.end(), input_it, input.end()); *input_start += input_remaining; samples_to_next_step_ -= input_remaining; return false; } else { input_queue_.insert(input_queue_.end(), input_it, input_it + samples_to_next_step_); *input_start += samples_to_next_step_; input_queue_.erase( input_queue_.begin(), input_queue_.begin() + input_queue_.size() - window_length_); samples_to_next_step_ = step_length_; return true; } } void Spectrogram::ProcessCoreFFT() { for (int j = 0; j < window_length_; ++j) { fft_input_output_[j] = input_queue_[j] * window_[j]; } for (int j = window_length_; j < fft_length_; ++j) { fft_input_output_[j] = 0.0; } const int kForwardFFT = 1; rdft(fft_length_, kForwardFFT, &fft_input_output_[0], &fft_integer_working_area_[0], &fft_double_working_area_[0]); fft_input_output_[fft_length_] = fft_input_output_[1]; fft_input_output_[fft_length_ + 1] = 0; fft_input_output_[1] = 0; } } }
#include "tensorflow/core/kernels/spectrogram.h" #include <complex> #include <vector> #include "tensorflow/core/kernels/spectrogram_test_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/resource_loader.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { using ::std::complex; string InputFilename() { return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data", "short_test_segment.wav"); } string ExpectedFilename() { return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data", "short_test_segment_spectrogram.csv.bin"); } const int kDataVectorLength = 257; const int kNumberOfFramesInTestData = 178; string ExpectedNonPowerOfTwoFilename() { return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data", "short_test_segment_spectrogram_400_200.csv.bin"); } const int kNonPowerOfTwoDataVectorLength = 257; const int kNumberOfFramesInNonPowerOfTwoTestData = 228; TEST(SpectrogramTest, TooLittleDataYieldsNoFrames) { Spectrogram sgram; sgram.Initialize(400, 200); std::vector<double> input; SineWave(44100, 1000.0, 0.001, &input); EXPECT_EQ(44, input.size()); std::vector<std::vector<complex<double>>> output; sgram.ComputeComplexSpectrogram(input, &output); EXPECT_EQ(0, output.size()); } TEST(SpectrogramTest, StepSizeSmallerThanWindow) { Spectrogram sgram; EXPECT_TRUE(sgram.Initialize(400, 200)); std::vector<double> input; SineWave(44100, 1000.0, 0.015, &input); EXPECT_EQ(661, input.size()); std::vector<std::vector<complex<double>>> output; sgram.ComputeComplexSpectrogram(input, &output); EXPECT_EQ(2, output.size()); } TEST(SpectrogramTest, StepSizeBiggerThanWindow) { Spectrogram sgram; EXPECT_TRUE(sgram.Initialize(200, 400)); std::vector<double> input; SineWave(44100, 1000.0, 0.02, &input); EXPECT_EQ(882, input.size()); std::vector<std::vector<complex<double>>> output; sgram.ComputeComplexSpectrogram(input, &output); EXPECT_EQ(2, output.size()); } TEST(SpectrogramTest, StepSizeBiggerThanWindow2) { Spectrogram sgram; EXPECT_TRUE(sgram.Initialize(200, 400)); std::vector<double> input; SineWave(44100, 1000.0, 0.016, &input); EXPECT_GT(input.size(), 600); EXPECT_LT(input.size(), 800); std::vector<std::vector<complex<double>>> output; sgram.ComputeComplexSpectrogram(input, &output); EXPECT_EQ(2, output.size()); } TEST(SpectrogramTest, MultipleCallsToComputeComplexSpectrogramMayYieldDifferentNumbersOfFrames) { Spectrogram sgram; sgram.Initialize(200, 400); std::vector<double> input; SineWave(44100, 1000.0, 0.02, &input); EXPECT_EQ(882, input.size()); std::vector<std::vector<complex<double>>> output; const std::vector<int> expected_output_sizes = { 2, 2, 3, }; for (int expected_output_size : expected_output_sizes) { sgram.ComputeComplexSpectrogram(input, &output); EXPECT_EQ(expected_output_size, output.size()); } } TEST(SpectrogramTest, CumulatingExcessInputsForOverlappingFrames) { Spectrogram sgram; sgram.Initialize(400, 200); std::vector<double> input; SineWave(44100, 1000.0, 0.02, &input); EXPECT_EQ(882, input.size()); std::vector<std::vector<complex<double>>> output; const std::vector<int> expected_output_sizes = { 3, 4, 5, }; for (int expected_output_size : expected_output_sizes) { sgram.ComputeComplexSpectrogram(input, &output); EXPECT_EQ(expected_output_size, output.size()); } } TEST(SpectrogramTest, StepSizeEqualToWindowWorks) { Spectrogram sgram; sgram.Initialize(200, 200); std::vector<double> input; SineWave(44100, 1000.0, 0.05, &input); EXPECT_EQ(2205, input.size()); std::vector<std::vector<complex<double>>> output; sgram.ComputeComplexSpectrogram(input, &output); EXPECT_EQ(11, output.size()); } template <class ExpectedSample, class ActualSample> void CompareComplexData( const std::vector<std::vector<complex<ExpectedSample>>>& expected, const std::vector<std::vector<complex<ActualSample>>>& actual, double tolerance) { ASSERT_EQ(actual.size(), expected.size()); for (int i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].size(), actual[i].size()); for (int j = 0; j < expected[i].size(); ++j) { ASSERT_NEAR(real(expected[i][j]), real(actual[i][j]), tolerance) << ": where i=" << i << " and j=" << j << "."; ASSERT_NEAR(imag(expected[i][j]), imag(actual[i][j]), tolerance) << ": where i=" << i << " and j=" << j << "."; } } } template <class Sample> double GetMaximumAbsolute(const std::vector<std::vector<Sample>>& spectrogram) { double max_absolute = 0.0; for (int i = 0; i < spectrogram.size(); ++i) { for (int j = 0; j < spectrogram[i].size(); ++j) { double absolute_value = std::abs(spectrogram[i][j]); if (absolute_value > max_absolute) { max_absolute = absolute_value; } } } return max_absolute; } template <class ExpectedSample, class ActualSample> void CompareMagnitudeData( const std::vector<std::vector<complex<ExpectedSample>>>& expected_complex_output, const std::vector<std::vector<ActualSample>>& actual_squared_magnitude, double tolerance) { ASSERT_EQ(actual_squared_magnitude.size(), expected_complex_output.size()); for (int i = 0; i < expected_complex_output.size(); ++i) { ASSERT_EQ(expected_complex_output[i].size(), actual_squared_magnitude[i].size()); for (int j = 0; j < expected_complex_output[i].size(); ++j) { ASSERT_NEAR(norm(expected_complex_output[i][j]), actual_squared_magnitude[i][j], tolerance) << ": where i=" << i << " and j=" << j << "."; } } } TEST(SpectrogramTest, ReInitializationWorks) { Spectrogram sgram; sgram.Initialize(512, 256); std::vector<double> input; CHECK( ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input)); std::vector<std::vector<complex<double>>> first_output; std::vector<std::vector<complex<double>>> second_output; sgram.Initialize(512, 256); sgram.ComputeComplexSpectrogram(input, &first_output); sgram.Initialize(512, 256); sgram.ComputeComplexSpectrogram(input, &second_output); ASSERT_EQ(first_output.size(), second_output.size()); int slice_size = first_output[0].size(); for (int i = 0; i < first_output.size(); ++i) { ASSERT_EQ(slice_size, first_output[i].size()); ASSERT_EQ(slice_size, second_output[i].size()); for (int j = 0; j < slice_size; ++j) { ASSERT_EQ(first_output[i][j], second_output[i][j]); } } } TEST(SpectrogramTest, ComputedComplexDataAgreeWithMatlab) { const int kInputDataLength = 45870; Spectrogram sgram; sgram.Initialize(512, 256); std::vector<double> input; CHECK( ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input)); EXPECT_EQ(kInputDataLength, input.size()); std::vector<std::vector<complex<double>>> expected_output; ASSERT_TRUE(ReadRawFloatFileToComplexVector( GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength, &expected_output)); EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size()); EXPECT_EQ(kDataVectorLength, expected_output[0].size()); std::vector<std::vector<complex<double>>> output; sgram.ComputeComplexSpectrogram(input, &output); CompareComplexData(expected_output, output, 1e-5); } TEST(SpectrogramTest, ComputedFloatComplexDataAgreeWithMatlab) { const int kInputDataLength = 45870; Spectrogram sgram; sgram.Initialize(512, 256); std::vector<double> double_input; CHECK(ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &double_input)); std::vector<float> input; input.assign(double_input.begin(), double_input.end()); EXPECT_EQ(kInputDataLength, input.size()); std::vector<std::vector<complex<double>>> expected_output; ASSERT_TRUE(ReadRawFloatFileToComplexVector( GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength, &expected_output)); EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size()); EXPECT_EQ(kDataVectorLength, expected_output[0].size()); std::vector<std::vector<complex<float>>> output; sgram.ComputeComplexSpectrogram(input, &output); CompareComplexData(expected_output, output, 1e-4); } TEST(SpectrogramTest, ComputedSquaredMagnitudeDataAgreeWithMatlab) { const int kInputDataLength = 45870; Spectrogram sgram; sgram.Initialize(512, 256); std::vector<double> input; CHECK( ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input)); EXPECT_EQ(kInputDataLength, input.size()); std::vector<std::vector<complex<double>>> expected_output; ASSERT_TRUE(ReadRawFloatFileToComplexVector( GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength, &expected_output)); EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size()); EXPECT_EQ(kDataVectorLength, expected_output[0].size()); std::vector<std::vector<double>> output; sgram.ComputeSquaredMagnitudeSpectrogram(input, &output); CompareMagnitudeData(expected_output, output, 1e-3); } TEST(SpectrogramTest, ComputedFloatSquaredMagnitudeDataAgreeWithMatlab) { const int kInputDataLength = 45870; Spectrogram sgram; sgram.Initialize(512, 256); std::vector<double> double_input; CHECK(ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &double_input)); EXPECT_EQ(kInputDataLength, double_input.size()); std::vector<float> input; input.assign(double_input.begin(), double_input.end()); std::vector<std::vector<complex<double>>> expected_output; ASSERT_TRUE(ReadRawFloatFileToComplexVector( GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength, &expected_output)); EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size()); EXPECT_EQ(kDataVectorLength, expected_output[0].size()); std::vector<std::vector<float>> output; sgram.ComputeSquaredMagnitudeSpectrogram(input, &output); double max_absolute = GetMaximumAbsolute(output); EXPECT_GT(max_absolute, 2300.0); CompareMagnitudeData(expected_output, output, 2e-4); } TEST(SpectrogramTest, ComputedNonPowerOfTwoComplexDataAgreeWithMatlab) { const int kInputDataLength = 45870; Spectrogram sgram; sgram.Initialize(400, 200); std::vector<double> input; CHECK( ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input)); EXPECT_EQ(kInputDataLength, input.size()); std::vector<std::vector<complex<double>>> expected_output; ASSERT_TRUE(ReadRawFloatFileToComplexVector( GetDataDependencyFilepath(ExpectedNonPowerOfTwoFilename()), kNonPowerOfTwoDataVectorLength, &expected_output)); EXPECT_EQ(kNumberOfFramesInNonPowerOfTwoTestData, expected_output.size()); EXPECT_EQ(kNonPowerOfTwoDataVectorLength, expected_output[0].size()); std::vector<std::vector<complex<double>>> output; sgram.ComputeComplexSpectrogram(input, &output); CompareComplexData(expected_output, output, 1e-5); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/spectrogram.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/spectrogram_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ba8d96ad-15f5-4e3a-b66c-9871fe9caa51
cpp
tensorflow/tensorflow
mfcc
tensorflow/lite/kernels/internal/mfcc.cc
tensorflow/lite/kernels/mfcc_test.cc
#include <math.h> #include "tensorflow/lite/kernels/internal/mfcc.h" namespace tflite { namespace internal { const double kDefaultUpperFrequencyLimit = 4000; const double kDefaultLowerFrequencyLimit = 20; const double kFilterbankFloor = 1e-12; const int kDefaultFilterbankChannelCount = 40; const int kDefaultDCTCoefficientCount = 13; Mfcc::Mfcc() : initialized_(false), lower_frequency_limit_(kDefaultLowerFrequencyLimit), upper_frequency_limit_(kDefaultUpperFrequencyLimit), filterbank_channel_count_(kDefaultFilterbankChannelCount), dct_coefficient_count_(kDefaultDCTCoefficientCount) {} bool Mfcc::Initialize(int input_length, double input_sample_rate) { bool initialized = mel_filterbank_.Initialize( input_length, input_sample_rate, filterbank_channel_count_, lower_frequency_limit_, upper_frequency_limit_); initialized &= dct_.Initialize(filterbank_channel_count_, dct_coefficient_count_); initialized_ = initialized; return initialized; } void Mfcc::Compute(const std::vector<double>& spectrogram_frame, std::vector<double>* output) const { if (!initialized_) { return; } std::vector<double> working; mel_filterbank_.Compute(spectrogram_frame, &working); for (int i = 0; i < working.size(); ++i) { double val = working[i]; if (val < kFilterbankFloor) { val = kFilterbankFloor; } working[i] = log(val); } dct_.Compute(working, output); } } }
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "flatbuffers/flexbuffers.h" #include "tensorflow/lite/core/interpreter.h" #include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { namespace ops { namespace custom { TfLiteRegistration* Register_MFCC(); namespace { using ::testing::ElementsAre; using ::testing::ElementsAreArray; class BaseMfccOpModel : public SingleOpModel { public: BaseMfccOpModel(const TensorData& input1, const TensorData& input2, const TensorData& output) { input1_ = AddInput(input1); input2_ = AddInput(input2); output_ = AddOutput(output); flexbuffers::Builder fbb; fbb.Map([&]() { fbb.Int("upper_frequency_limit", 4000); fbb.Int("lower_frequency_limit", 20); fbb.Int("filterbank_channel_count", 40); fbb.Int("dct_coefficient_count", 13); }); fbb.Finish(); SetCustomOp("Mfcc", fbb.GetBuffer(), Register_MFCC); BuildInterpreter({GetShape(input1_), GetShape(input2_)}); } int input1() { return input1_; } int input2() { return input2_; } std::vector<float> GetOutput() { return ExtractVector<float>(output_); } std::vector<int> GetOutputShape() { return GetTensorShape(output_); } protected: int input1_; int input2_; int output_; }; TEST(MfccOpTest, SimpleTest) { BaseMfccOpModel m({TensorType_FLOAT32, {1, 1, 513}}, {TensorType_INT32, {1}}, {TensorType_FLOAT32, {}}); std::vector<float> data(513); for (int i = 0; i < data.size(); ++i) { data[i] = i + 1; } m.PopulateTensor<float>(m.input1(), 0, data.data(), data.data() + data.size()); m.PopulateTensor<int>(m.input2(), {22050}); ASSERT_EQ(m.Invoke(), kTfLiteOk); std::vector<int> output_shape = m.GetOutputShape(); EXPECT_THAT(output_shape, ElementsAre(1, 1, 13)); EXPECT_THAT( m.GetOutput(), ElementsAreArray(ArrayFloatNear( {29.13970072, -6.41568601, -0.61903012, -0.96778652, -0.26819878, -0.40907028, -0.15614748, -0.23203119, -0.10481487, -0.1543029, -0.0769791, -0.10806114, -0.06047613}, 1e-3))); } TEST(MfccOpTest, ScalarInputRateTest) { BaseMfccOpModel m({TensorType_FLOAT32, {1, 1, 513}}, {TensorType_INT32, {}}, {TensorType_FLOAT32, {}}); std::vector<float> data(513); for (int i = 0; i < data.size(); ++i) { data[i] = i + 1; } m.PopulateTensor<float>(m.input1(), 0, data.data(), data.data() + data.size()); m.PopulateTensor<int>(m.input2(), {22050}); ASSERT_EQ(m.Invoke(), kTfLiteOk); std::vector<int> output_shape = m.GetOutputShape(); EXPECT_THAT(output_shape, ElementsAre(1, 1, 13)); EXPECT_THAT( m.GetOutput(), ElementsAreArray(ArrayFloatNear( {29.13970072, -6.41568601, -0.61903012, -0.96778652, -0.26819878, -0.40907028, -0.15614748, -0.23203119, -0.10481487, -0.1543029, -0.0769791, -0.10806114, -0.06047613}, 1e-3))); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/mfcc.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/mfcc_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b1e2c9fb-4420-4842-9979-8e81930a9816
cpp
tensorflow/tensorflow
sparse_tensor_dense_matmul_op
tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc
tensorflow/core/kernels/sparse_tensor_dense_matmul_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/sparse_tensor_dense_matmul_op.h" #include "Eigen/Core" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/platform/bfloat16.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Device, typename T, typename Tindices> class SparseTensorDenseMatMulOp : public OpKernel { public: explicit SparseTensorDenseMatMulOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("adjoint_a", &adjoint_a_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("adjoint_b", &adjoint_b_)); } void Compute(OpKernelContext* ctx) override { const Tensor* a_indices; const Tensor* a_values; const Tensor* a_shape; const Tensor* b; OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices)); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values)); OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); OP_REQUIRES_OK(ctx, ctx->input("b", &b)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b->shape()), errors::InvalidArgument("Tensor 'b' is not a matrix")); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()), errors::InvalidArgument("Tensor 'a_shape' is not a vector")); OP_REQUIRES( ctx, a_shape->NumElements() == 2, errors::InvalidArgument("Tensor 'a_shape' must have 2 elements")); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values->shape()), errors::InvalidArgument("Tensor 'a_values' is not a vector")); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_indices->shape()), errors::InvalidArgument("Tensor 'a_indices' is not a matrix")); const int64_t nnz = a_indices->shape().dim_size(0); OP_REQUIRES(ctx, nnz == a_values->NumElements(), errors::InvalidArgument("Number of rows of a_indices does not " "match number of entries in a_values")); OP_REQUIRES( ctx, a_indices->shape().dim_size(1) == a_shape->NumElements(), errors::InvalidArgument("Number of columns of a_indices does not match " "number of entries in a_shape")); auto a_shape_t = a_shape->vec<int64_t>(); const int64_t outer_left = (adjoint_a_) ? a_shape_t(1) : a_shape_t(0); const int64_t outer_right = (adjoint_b_) ? b->shape().dim_size(0) : b->shape().dim_size(1); const int64_t inner_left = (adjoint_a_) ? a_shape_t(0) : a_shape_t(1); const int64_t inner_right = (adjoint_b_) ? b->shape().dim_size(1) : b->shape().dim_size(0); OP_REQUIRES( ctx, inner_right == inner_left, errors::InvalidArgument( "Cannot multiply A and B because inner dimension does not match: ", inner_left, " vs. ", inner_right, ". Did you forget a transpose? " "Dimensions of A: [", a_shape_t(0), ", ", a_shape_t(1), "). Dimensions of B: ", b->shape().DebugString())); if (std::is_same<Device, GPUDevice>::value) { const int int32max = std::numeric_limits<int>::max(); OP_REQUIRES( ctx, (FastBoundsCheck(inner_left, int32max) && FastBoundsCheck(inner_right, int32max) && FastBoundsCheck(outer_left, int32max) && FastBoundsCheck(outer_right, int32max) && FastBoundsCheck(b->NumElements(), int32max) && FastBoundsCheck(outer_left * outer_right, int32max) && FastBoundsCheck(a_values->NumElements(), int32max)), errors::InvalidArgument("Cannot use GPU for > 2^31 entry inputs")); OP_REQUIRES(ctx, FastBoundsCheck(nnz * outer_right, int32max), errors::InvalidArgument( "Cannot use GPU when output.shape[1] * nnz(a) > 2^31")); } TensorShape out_shape({outer_left, outer_right}); Tensor* out = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out)); if (out->NumElements() == 0) { return; } if (a_values->NumElements() == 0 || b->NumElements() == 0) { functor::SetZeroFunctor<Device, T> f; f(ctx->eigen_device<Device>(), out->flat<T>()); return; } #define MAYBE_ADJOINT(ADJ_A, ADJ_B) \ if (adjoint_a_ == ADJ_A && adjoint_b_ == ADJ_B) { \ Status functor_status = functor::SparseTensorDenseMatMulFunctor< \ Device, T, Tindices, ADJ_A, \ ADJ_B>::Compute(ctx, out->matrix<T>(), a_indices->matrix<Tindices>(), \ a_values->vec<T>(), b->matrix<T>()); \ OP_REQUIRES_OK(ctx, functor_status); \ } MAYBE_ADJOINT(false, false); MAYBE_ADJOINT(false, true); MAYBE_ADJOINT(true, false); MAYBE_ADJOINT(true, true); #undef MAYBE_ADJOINT } private: bool adjoint_a_; bool adjoint_b_; }; #define REGISTER_CPU(TypeT, TypeIndex) \ REGISTER_KERNEL_BUILDER( \ Name("SparseTensorDenseMatMul") \ .Device(DEVICE_CPU) \ .TypeConstraint<TypeT>("T") \ .TypeConstraint<TypeIndex>("Tindices") \ .HostMemory("a_shape"), \ SparseTensorDenseMatMulOp<CPUDevice, TypeT, TypeIndex>); #define REGISTER_KERNELS_CPU(T) \ REGISTER_CPU(T, int64_t); \ REGISTER_CPU(T, int32) REGISTER_KERNELS_CPU(Eigen::half); REGISTER_KERNELS_CPU(float); REGISTER_KERNELS_CPU(double); REGISTER_KERNELS_CPU(int32); REGISTER_KERNELS_CPU(complex64); REGISTER_KERNELS_CPU(complex128); REGISTER_KERNELS_CPU(bfloat16); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace functor { #define DECLARE_GPU_SPEC(T, Tindices, ADJ_A, ADJ_B) \ template <> \ Status SparseTensorDenseMatMulFunctor< \ GPUDevice, T, Tindices, ADJ_A, \ ADJ_B>::Compute(OpKernelContext* ctx, typename TTypes<T>::Matrix out, \ TTypes<Tindices>::ConstMatrix a_indices, \ typename TTypes<T>::ConstVec a_values, \ typename TTypes<T>::ConstMatrix b); \ extern template struct SparseTensorDenseMatMulFunctor< \ GPUDevice, T, Tindices, ADJ_A, ADJ_B>; #define REGISTER_GPU_SPEC(T, ADJ_A, ADJ_B) \ DECLARE_GPU_SPEC(T, int32, ADJ_A, ADJ_B); \ DECLARE_GPU_SPEC(T, int64_t, ADJ_A, ADJ_B) #define DECLARE_ADJOINT_GPU_SPEC(T) \ REGISTER_GPU_SPEC(T, false, false) \ REGISTER_GPU_SPEC(T, false, true) \ REGISTER_GPU_SPEC(T, true, false) \ REGISTER_GPU_SPEC(T, true, true) DECLARE_ADJOINT_GPU_SPEC(Eigen::half); DECLARE_ADJOINT_GPU_SPEC(float); DECLARE_ADJOINT_GPU_SPEC(double); DECLARE_ADJOINT_GPU_SPEC(complex64); DECLARE_ADJOINT_GPU_SPEC(complex128); #undef DECLARE_ADJOINT_GPU_SPEC #undef DECLARE_GPU_SPEC #undef REGISTER_GPU_SPEC } #define REGISTER_GPU(TypeT, TypeIndex) \ REGISTER_KERNEL_BUILDER( \ Name("SparseTensorDenseMatMul") \ .Device(DEVICE_GPU) \ .TypeConstraint<TypeT>("T") \ .TypeConstraint<TypeIndex>("Tindices") \ .HostMemory("a_shape"), \ SparseTensorDenseMatMulOp<GPUDevice, TypeT, TypeIndex>); #define REGISTER_KERNELS_GPU(T) \ REGISTER_GPU(T, int64_t); \ REGISTER_GPU(T, int32) REGISTER_KERNELS_GPU(Eigen::half); REGISTER_KERNELS_GPU(float); REGISTER_KERNELS_GPU(double); REGISTER_KERNELS_GPU(complex64); REGISTER_KERNELS_GPU(complex128); #undef REGISTER_GPU #undef REGISTER_KERNELS_GPU #endif namespace functor { namespace { Status KOutOfBoundsError(int64_t k, std::size_t i, int rhs_index_a, std::size_t lhs_right) { return errors::InvalidArgument("k (", k, ") from index[", i, ",", rhs_index_a, "] out of bounds (>=", lhs_right, ")"); } Status MOutOfBoundsError(int64_t m, std::size_t i, int lhs_index_a, int64_t out_dim0) { return errors::InvalidArgument("m (", m, ") from index[", i, ",", lhs_index_a, "] out of bounds (>=", out_dim0, ")"); } template <typename T, typename Tsum, typename Tindices, bool ADJ_A, bool ADJ_B> Status SparseTensorDenseMatMulImpl( typename TTypes<Tsum>::Matrix out, typename TTypes<Tindices>::ConstMatrix a_indices, typename TTypes<T>::ConstVec a_values, typename TTypes<T>::ConstMatrix b) { static constexpr std::size_t kNumVectorize = 32; const std::size_t nnz = a_values.size(); const std::size_t rhs_right = (ADJ_B ? b.dimension(0) : b.dimension(1)); const std::size_t lhs_right = (ADJ_B ? b.dimension(1) : b.dimension(0)); const int lhs_index_a = ADJ_A ? 1 : 0; const int rhs_index_a = ADJ_A ? 0 : 1; if (rhs_right < kNumVectorize) { auto maybe_adjoint_b = MaybeAdjoint<decltype(b), ADJ_B>(b); for (std::size_t i = 0; i < nnz; ++i) { const Tindices m = internal::SubtleMustCopy(a_indices(i, lhs_index_a)); const Tindices k = internal::SubtleMustCopy(a_indices(i, rhs_index_a)); if (!FastBoundsCheck(k, lhs_right)) { return KOutOfBoundsError(k, i, rhs_index_a, lhs_right); } if (!FastBoundsCheck(m, out.dimension(0))) { return MOutOfBoundsError(m, i, lhs_index_a, out.dimension(0)); } const T a_value = ADJ_A ? MaybeConj(a_values(i)) : a_values(i); for (std::size_t n = 0; n < rhs_right; ++n) { const T b_value = maybe_adjoint_b(k, n); out(m, n) += static_cast<Tsum>(a_value) * static_cast<Tsum>(b_value); } } } else { const int b_chip_index = ADJ_B ? 1 : 0; #define LOOP_NNZ(b_passed) \ for (std::size_t i = 0; i < nnz; ++i) { \ const Tindices m = internal::SubtleMustCopy(a_indices(i, lhs_index_a)); \ const Tindices k = internal::SubtleMustCopy(a_indices(i, rhs_index_a)); \ const T a_value = (ADJ_A) ? MaybeConj(a_values(i)) : a_values(i); \ if (!FastBoundsCheck(k, lhs_right)) { \ return KOutOfBoundsError(k, i, rhs_index_a, lhs_right); \ } \ if (!FastBoundsCheck(m, out.dimension(0))) { \ return MOutOfBoundsError(m, i, lhs_index_a, out.dimension(0)); \ } \ out.template chip<0>(m) += \ b_passed.template chip<b_chip_index>(k).template cast<Tsum>() * \ static_cast<Tsum>(a_value); \ } if (ADJ_B) { Eigen::array<int, 2> shuffle{1, 0}; Eigen::Tensor<T, 2, Eigen::ColMajor> col_major_conj_b = b.swap_layout().shuffle(shuffle).conjugate(); LOOP_NNZ(col_major_conj_b); } else { LOOP_NNZ(b); } #undef LOOP_NNZ } return absl::OkStatus(); } } template <typename T, typename Tindices, bool ADJ_A, bool ADJ_B> struct SparseTensorDenseMatMulFunctor<CPUDevice, T, Tindices, ADJ_A, ADJ_B> { static Status Compute(OpKernelContext* ctx, typename TTypes<T>::Matrix out, typename TTypes<Tindices>::ConstMatrix a_indices, typename TTypes<T>::ConstVec a_values, typename TTypes<T>::ConstMatrix b) { using Tsum = typename SumType<T>::type; Tensor temp_out_t; if (!std::is_same<T, Tsum>::value) { TF_RETURN_IF_ERROR(ctx->allocate_temp( DataTypeToEnum<Tsum>::value, TensorShape({out.dimension(0), out.dimension(1)}), &temp_out_t)); auto temp_out = temp_out_t.matrix<Tsum>(); temp_out.setZero(); TF_RETURN_IF_ERROR( SparseTensorDenseMatMulImpl<T, Tsum, Tindices, ADJ_A, ADJ_B>( temp_out, a_indices, a_values, b)); out = temp_out.template cast<T>(); } else { out.setZero(); auto out_workaround = *reinterpret_cast<typename TTypes<Tsum>::Matrix*>(&out); TF_RETURN_IF_ERROR( SparseTensorDenseMatMulImpl<T, Tsum, Tindices, ADJ_A, ADJ_B>( out_workaround, a_indices, a_values, b)); } return absl::OkStatus(); } }; } }
#include <random> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { Node* SparseTensorDenseMatMulNode(Graph* g, Node* a_indices, Node* a_values, Node* a_shape, Node* b, bool adjoint_a, bool adjoint_b) { Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "SparseTensorDenseMatMul") .Input(a_indices) .Input(a_values) .Input(a_shape) .Input(b) .Attr("T", DT_FLOAT) .Attr("adjoint_a", adjoint_a) .Attr("adjoint_b", adjoint_b) .Finalize(g, &ret)); return ret; } static Graph* SparseTensorDenseMatmul(int nnz, int m, int k, int n, bool adjoint_a, bool adjoint_b) { Graph* g = new Graph(OpRegistry::Global()); Tensor a_values(DT_FLOAT, TensorShape({nnz})); Tensor a_indices(DT_INT64, TensorShape({nnz, 2})); Tensor a_shape(DT_INT64, TensorShape({2})); auto a_shape_t = a_shape.vec<int64_t>(); a_shape_t(0) = adjoint_a ? k : m; a_shape_t(1) = adjoint_a ? m : k; a_values.flat<float>().setRandom(); auto a_indices_t = a_indices.matrix<int64_t>(); std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> a_lhs_dist(0, a_shape_t(0) - 1); std::uniform_int_distribution<> a_rhs_dist(0, a_shape_t(1) - 1); for (int32_t i = 0; i < nnz; ++i) { a_indices_t(i, 0) = a_lhs_dist(gen); a_indices_t(i, 1) = a_rhs_dist(gen); } Tensor b(DT_FLOAT, adjoint_b ? TensorShape({n, k}) : TensorShape({k, n})); b.flat<float>().setRandom(); SparseTensorDenseMatMulNode( g, test::graph::Constant(g, a_indices), test::graph::Constant(g, a_values), test::graph::HostConstant(g, a_shape), test::graph::Constant(g, b), adjoint_a, adjoint_b); return g; } #define BM_SparseTensorDenseMatmulDev(NNZ, M, K, N, TA, TB, DEVICE) \ static void \ BM_SparseTensorDenseMatmul##_##NNZ##_##M##_##K##_##N##_##TA##_##TB##_##DEVICE( \ ::testing::benchmark::State& state) { \ int64_t items_per_iter = (static_cast<int64_t>(NNZ) * (TB ? K : N)); \ test::Benchmark(#DEVICE, SparseTensorDenseMatmul(NNZ, M, K, N, TA, TB), \ false) \ .Run(state); \ state.SetItemsProcessed(state.iterations() * items_per_iter); \ state.SetBytesProcessed(state.iterations() * items_per_iter * \ sizeof(float)); \ } \ BENCHMARK( \ BM_SparseTensorDenseMatmul##_##NNZ##_##M##_##K##_##N##_##TA##_##TB##_##DEVICE); #define BM_SparseTensorDenseMatmul(NNZ, M, K, N, TA, TB) \ BM_SparseTensorDenseMatmulDev(NNZ, M, K, N, TA, TB, cpu); \ BM_SparseTensorDenseMatmulDev(NNZ, M, K, N, TA, TB, gpu); BM_SparseTensorDenseMatmul(128, 8, 512, 1, false, false); BM_SparseTensorDenseMatmul(128, 16, 512, 1, false, false); BM_SparseTensorDenseMatmul(128, 128, 512, 1, false, false); BM_SparseTensorDenseMatmul(128, 4096, 4096, 1, false, false); BM_SparseTensorDenseMatmul(1024, 4096, 4096, 1, false, false); BM_SparseTensorDenseMatmul(16384, 4096, 4096, 1, false, false); BM_SparseTensorDenseMatmul(128, 8, 1024, 16, false, false); BM_SparseTensorDenseMatmul(128, 16, 1024, 16, false, false); BM_SparseTensorDenseMatmul(128, 128, 1024, 16, false, false); BM_SparseTensorDenseMatmul(128, 4096, 4096, 128, false, false); BM_SparseTensorDenseMatmul(128, 4096, 4096, 1024, false, false); BM_SparseTensorDenseMatmul(1024, 8, 1024, 16, false, false); BM_SparseTensorDenseMatmul(1024, 16, 1024, 16, false, false); BM_SparseTensorDenseMatmul(1024, 128, 1024, 16, false, false); BM_SparseTensorDenseMatmul(1024, 4096, 4096, 128, false, false); BM_SparseTensorDenseMatmul(1024, 4096, 4096, 1024, false, false); BM_SparseTensorDenseMatmul(16384, 8, 1024, 16, false, false); BM_SparseTensorDenseMatmul(16384, 16, 1024, 16, false, false); BM_SparseTensorDenseMatmul(16384, 128, 1024, 16, false, false); BM_SparseTensorDenseMatmul(16384, 4096, 4096, 128, false, false); BM_SparseTensorDenseMatmul(16384, 4096, 4096, 1024, false, false); BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, false, false); BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, false, true); BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, true, false); BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, true, true); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_tensor_dense_matmul_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f33bd101-d6e4-43c2-8486-407ea91aac0e
cpp
tensorflow/tensorflow
sparse_fill_empty_rows_op
tensorflow/core/kernels/sparse_fill_empty_rows_op.cc
tensorflow/core/kernels/sparse_fill_empty_rows_op_test.cc
#define EIGEN_USE_THREADS #include <algorithm> #include <numeric> #include <unordered_map> #include <utility> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/fill_empty_rows_functor.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { using CPUDevice = Eigen::ThreadPoolDevice; using GPUDevice = Eigen::GpuDevice; namespace { template <typename Device, typename T, typename Tindex> void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, indices_t.dim_size(0) == values_t.dim_size(0), errors::InvalidArgument("The length of `values` (", values_t.dim_size(0), ") must match the first dimension of `indices` (", indices_t.dim_size(0), ")."), done); OP_REQUIRES_ASYNC( context, indices_t.dim_size(1) == dense_shape_t.dim_size(0), errors::InvalidArgument("The length of `dense_shape` (", dense_shape_t.dim_size(0), ") must match the second dimension of `indices` ", "(", indices_t.dim_size(1), ")."), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0, errors::InvalidArgument("Dense shape cannot be empty."), done); using FunctorType = functor::FillEmptyRows<Device, T, Tindex, false>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); } } template <typename Device, typename T, typename Tindex> class SparseFillEmptyRowsOp : public OpKernel { public: explicit SparseFillEmptyRowsOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { SparseFillEmptyRowsOpImpl<Device, T, Tindex>(context); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \ .Device(DEVICE_##D) \ .HostMemory("dense_shape") \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_ALL_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T, typename Tindex> class SparseFillEmptyRowsGPUOp : public AsyncOpKernel { public: explicit SparseFillEmptyRowsGPUOp(OpKernelConstruction* context) : AsyncOpKernel(context) {} void ComputeAsync(OpKernelContext* context, DoneCallback done) override { SparseFillEmptyRowsOpImpl<GPUDevice, T, Tindex>(context, done); } }; #define REGISTER_KERNELS(T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \ .Device(DEVICE_GPU) \ .HostMemory("dense_shape") \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsGPUOp<T, Tindex>) #define REGISTER_KERNELS_TINDEX(T) REGISTER_KERNELS(T, int64) TF_CALL_POD_TYPES(REGISTER_KERNELS_TINDEX) #undef REGISTER_KERNELS_TINDEX #undef REGISTER_KERNELS #endif template <typename Device, typename T, typename Tindex> class SparseFillEmptyRowsGradOp : public OpKernel { public: explicit SparseFillEmptyRowsGradOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor* reverse_index_map_t; const Tensor* grad_values_t; OP_REQUIRES_OK(context, context->input("reverse_index_map", &reverse_index_map_t)); OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t)); OP_REQUIRES( context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()), errors::InvalidArgument("grad_values must be a vector, saw: ", grad_values_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec<Tindex>(); const auto grad_values = grad_values_t->vec<T>(); const Tindex N = reverse_index_map_t->shape().dim_size(0); Tensor* d_values_t; OP_REQUIRES_OK(context, context->allocate_output( "d_values", TensorShape({N}), &d_values_t)); auto d_values = d_values_t->vec<T>(); Tensor* d_default_value_t; OP_REQUIRES_OK(context, context->allocate_output("d_default_value", TensorShape({}), &d_default_value_t)); auto d_default_value = d_default_value_t->scalar<T>(); OP_REQUIRES_OK(context, functor::FillEmptyRowsGrad<Device, T, Tindex>()( context, reverse_index_map, grad_values, d_values, d_default_value)); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRowsGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsGradOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T, int64) TF_CALL_REAL_NUMBER_TYPES(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #endif #undef REGISTER_KERNELS }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class SparseFillEmptyRowsTest : public OpsTestBase { protected: void MakeOp(DataType index_type, DataType value_type) { TF_ASSERT_OK(NodeDefBuilder("sparsefillemptyrows", "SparseFillEmptyRows") .Input(FakeInput(index_type)) .Input(FakeInput(value_type)) .Input(FakeInput(index_type)) .Input(FakeInput(value_type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SparseFillEmptyRowsTest, SparseFillEmptyRows) { MakeOp(DT_INT64, DT_FLOAT); AddInputFromArray<int64>(TensorShape({4, 2}), {0, 1, 0, 3, 2, 0, 3, 1}); AddInputFromArray<float>(TensorShape({4}), {0, 3, 1, 2}); AddInputFromArray<int64>(TensorShape({2}), {5, 6}); AddInputFromArray<float>(TensorShape({}), {4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected0(allocator(), DT_INT64, {6, 2}); expected0.tensor<int64, 2>()(0, 0) = 0; expected0.tensor<int64, 2>()(0, 1) = 1; expected0.tensor<int64, 2>()(1, 0) = 0; expected0.tensor<int64, 2>()(1, 1) = 3; expected0.tensor<int64, 2>()(2, 0) = 1; expected0.tensor<int64, 2>()(2, 1) = 0; expected0.tensor<int64, 2>()(3, 0) = 2; expected0.tensor<int64, 2>()(3, 1) = 0; expected0.tensor<int64, 2>()(4, 0) = 3; expected0.tensor<int64, 2>()(4, 1) = 1; expected0.tensor<int64, 2>()(5, 0) = 4; expected0.tensor<int64, 2>()(5, 1) = 0; test::ExpectTensorEqual<int64>(expected0, *GetOutput(0)); Tensor expected1(allocator(), DT_FLOAT, {6}); test::FillValues<float>(&expected1, {0, 3, 4, 1, 2, 4}); test::ExpectTensorEqual<float>(expected1, *GetOutput(1)); Tensor expected2(allocator(), DT_BOOL, {5}); test::FillValues<bool>(&expected2, {false, true, false, false, true}); test::ExpectTensorEqual<bool>(expected2, *GetOutput(2)); Tensor expected3(allocator(), DT_INT64, {4}); test::FillValues<int64>(&expected3, {0, 1, 3, 4}); test::ExpectTensorEqual<int64>(expected3, *GetOutput(3)); } TEST_F(SparseFillEmptyRowsTest, IndicesValuesUnmatch) { MakeOp(DT_INT64, DT_FLOAT); AddInputFromArray<int64>(TensorShape({4, 2}), {0, 1, 0, 3, 2, 0, 3, 1}); AddInputFromArray<float>(TensorShape({3}), {0, 3, 1}); AddInputFromArray<int64>(TensorShape({2}), {5, 6}); AddInputFromArray<float>(TensorShape({}), {4}); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "The length of `values` (3) must match the " "first dimension of `indices` (4).")); } TEST_F(SparseFillEmptyRowsTest, IndicesDenseShapeUnmatch) { MakeOp(DT_INT64, DT_FLOAT); AddInputFromArray<int64>(TensorShape({4, 0}), {}); AddInputFromArray<float>(TensorShape({4}), {0, 3, 1, 2}); AddInputFromArray<int64>(TensorShape({2}), {5, 6}); AddInputFromArray<float>(TensorShape({}), {4}); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "The length of `dense_shape` (2) must match " "the second dimension of `indices` (0).")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_fill_empty_rows_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9b30b58c-7418-4bdc-8919-ea8833b2161b
cpp
tensorflow/tensorflow
fake_quant_ops
tensorflow/core/kernels/fake_quant_ops.cc
tensorflow/core/kernels/fake_quant_ops_test.cc
#define EIGEN_USE_THREADS #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) #define EIGEN_USE_GPU #endif #include "tensorflow/core/kernels/fake_quant_ops_functor.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/monitoring/gauge.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/util/determinism.h" using tensorflow::BinaryElementWiseOp; using tensorflow::DEVICE_CPU; #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) using tensorflow::DEVICE_GPU; #endif using tensorflow::OpKernel; using tensorflow::OpKernelConstruction; using tensorflow::OpKernelContext; using tensorflow::Tensor; using tensorflow::TensorShape; using tensorflow::TTypes; using tensorflow::UnaryElementWiseOp; using tensorflow::errors::InvalidArgument; namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; auto* using_fake_quant = monitoring::Gauge<bool, 0>::New( "/tensorflow/api/op/using_fake_quantization", "True if a fake_quant op is created."); #define SET_USING_FAKE_QUANT() using_fake_quant->GetCell()->Set(true) namespace { bool IsNumBitsValid(int num_bits) { return num_bits >= 2 && num_bits <= 16; } } template <typename Device> class FakeQuantWithMinMaxArgsOp : public UnaryElementWiseOp<float, FakeQuantWithMinMaxArgsOp<Device>> { public: typedef UnaryElementWiseOp<float, FakeQuantWithMinMaxArgsOp<Device>> Base; explicit FakeQuantWithMinMaxArgsOp(OpKernelConstruction* context) : Base::UnaryElementWiseOp(context) { OP_REQUIRES_OK(context, context->GetAttr("min", &min_)); OP_REQUIRES_OK(context, context->GetAttr("max", &max_)); OP_REQUIRES(context, min_ < max_, InvalidArgument("min has to be smaller than max, was: ", min_, " >= ", max_)); int num_bits; OP_REQUIRES_OK(context, context->GetAttr("num_bits", &num_bits)); OP_REQUIRES( context, IsNumBitsValid(num_bits), InvalidArgument("num_bits must be between 2 and 16, inclusive")); bool narrow_range; OP_REQUIRES_OK(context, context->GetAttr("narrow_range", &narrow_range)); quant_min_ = narrow_range ? 1 : 0; quant_max_ = (1 << num_bits) - 1; SET_USING_FAKE_QUANT(); } void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { FakeQuantWithMinMaxArgsFunctor<Device> functor; functor(context->eigen_device<Device>(), input.flat<float>(), min_, max_, quant_min_, quant_max_, output->flat<float>()); } private: float min_; float max_; int quant_min_; int quant_max_; }; template <typename Device> class FakeQuantWithMinMaxArgsGradientOp : public BinaryElementWiseOp<float, FakeQuantWithMinMaxArgsGradientOp<Device>> { public: typedef BinaryElementWiseOp<float, FakeQuantWithMinMaxArgsGradientOp<Device>> Base; explicit FakeQuantWithMinMaxArgsGradientOp(OpKernelConstruction* context) : Base::BinaryElementWiseOp(context) { OP_REQUIRES_OK(context, context->GetAttr("min", &min_)); OP_REQUIRES_OK(context, context->GetAttr("max", &max_)); OP_REQUIRES(context, min_ < max_, InvalidArgument("min has to be smaller than max, was: ", min_, " >= ", max_)); int num_bits; OP_REQUIRES_OK(context, context->GetAttr("num_bits", &num_bits)); OP_REQUIRES( context, IsNumBitsValid(num_bits), InvalidArgument("num_bits must be between 2 and 16, inclusive")); bool narrow_range; OP_REQUIRES_OK(context, context->GetAttr("narrow_range", &narrow_range)); quant_min_ = narrow_range ? 1 : 0; quant_max_ = (1 << num_bits) - 1; } template <int NDIMS> void Operate(OpKernelContext* context, const Tensor& gradient, const Tensor& input, Tensor* output) { OperateNoTemplate(context, gradient, input, output); } void OperateNoTemplate(OpKernelContext* context, const Tensor& gradient, const Tensor& input, Tensor* output) { OP_REQUIRES(context, input.IsSameSize(gradient), InvalidArgument("gradient and input must be the same size")); FakeQuantWithMinMaxArgsGradientFunctor<Device> functor; functor(context->eigen_device<Device>(), gradient.flat<float>(), input.flat<float>(), min_, max_, quant_min_, quant_max_, output->flat<float>()); } private: float min_; float max_; int quant_min_; int quant_max_; }; REGISTER_KERNEL_BUILDER(Name("FakeQuantWithMinMaxArgs").Device(DEVICE_CPU), FakeQuantWithMinMaxArgsOp<CPUDevice>); REGISTER_KERNEL_BUILDER( Name("FakeQuantWithMinMaxArgsGradient").Device(DEVICE_CPU), FakeQuantWithMinMaxArgsGradientOp<CPUDevice>); #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) typedef Eigen::GpuDevice GPUDevice; template <> void FakeQuantWithMinMaxArgsFunctor<GPUDevice>::operator()( const GPUDevice& d, typename TTypes<float>::ConstFlat inputs, const float min, const float max, const int quant_min, const int quant_max, typename TTypes<float>::Flat outputs); extern template struct FakeQuantWithMinMaxArgsFunctor<GPUDevice>; REGISTER_KERNEL_BUILDER(Name("FakeQuantWithMinMaxArgs").Device(DEVICE_GPU), FakeQuantWithMinMaxArgsOp<GPUDevice>); template <> void FakeQuantWithMinMaxArgsGradientFunctor<GPUDevice>::operator()( const GPUDevice& d, typename TTypes<float>::ConstFlat gradients, typename TTypes<float>::ConstFlat inputs, const float min, const float max, const int quant_min, const int quant_max, typename TTypes<float>::Flat backprops); REGISTER_KERNEL_BUILDER( Name("FakeQuantWithMinMaxArgsGradient").Device(DEVICE_GPU), FakeQuantWithMinMaxArgsGradientOp<GPUDevice>); #endif template <typename Device> class FakeQuantWithMinMaxVarsOp : public OpKernel { public: explicit FakeQuantWithMinMaxVarsOp(OpKernelConstruction* context) : OpKernel::OpKernel(context) { int num_bits; OP_REQUIRES_OK(context, context->GetAttr("num_bits", &num_bits)); OP_REQUIRES( context, IsNumBitsValid(num_bits), InvalidArgument("num_bits must be between 2 and 16, inclusive")); bool narrow_range; OP_REQUIRES_OK(context, context->GetAttr("narrow_range", &narrow_range)); quant_min_ = narrow_range ? 1 : 0; quant_max_ = (1 << num_bits) - 1; SET_USING_FAKE_QUANT(); } void Compute(OpKernelContext* context) override { CHECK_EQ(3, context->num_inputs()); const Tensor& input = context->input(0); const Tensor& min = context->input(1); const Tensor& max = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(min.shape()), InvalidArgument("`min` must be rank 0 but is rank ", min.dims())); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max.shape()), InvalidArgument("`max` must be rank 0 but is rank ", max.dims())); Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); FakeQuantWithMinMaxVarsFunctor<Device> functor; functor(context->eigen_device<Device>(), input.flat<float>(), min.scalar<float>(), max.scalar<float>(), quant_min_, quant_max_, output->flat<float>()); } private: int quant_min_; int quant_max_; }; template <typename Device> class FakeQuantWithMinMaxVarsGradientOp : public OpKernel { public: explicit FakeQuantWithMinMaxVarsGradientOp(OpKernelConstruction* context) : OpKernel::OpKernel(context) { int num_bits; OP_REQUIRES_OK(context, context->GetAttr("num_bits", &num_bits)); OP_REQUIRES( context, IsNumBitsValid(num_bits), InvalidArgument("num_bits must be between 2 and 16, inclusive")); bool narrow_range; OP_REQUIRES_OK(context, context->GetAttr("narrow_range", &narrow_range)); quant_min_ = narrow_range ? 1 : 0; quant_max_ = (1 << num_bits) - 1; if (std::is_same<Device, Eigen::GpuDevice>::value) { OP_REQUIRES( context, !OpDeterminismRequired(), errors::Unimplemented( "Determinism is not yet supported in GPU implementation of " "FakeQuantWithMinMaxVarsGradient.")); } } void Compute(OpKernelContext* context) override { CHECK_EQ(4, context->num_inputs()); const Tensor& gradient = context->input(0); const Tensor& input = context->input(1); OP_REQUIRES(context, input.IsSameSize(gradient), InvalidArgument("gradient and input must be the same size")); const Tensor& min = context->input(2); const Tensor& max = context->input(3); OP_REQUIRES( context, TensorShapeUtils::IsScalar(min.shape()), InvalidArgument("`min` must be rank 0 but is rank ", min.dims())); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max.shape()), InvalidArgument("`max` must be rank 0 but is rank ", max.dims())); Tensor* grad_wrt_input; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &grad_wrt_input)); TensorShape scalar_shape; Tensor* grad_wrt_min; OP_REQUIRES_OK(context, context->allocate_output(1, scalar_shape, &grad_wrt_min)); Tensor* grad_wrt_max; OP_REQUIRES_OK(context, context->allocate_output(2, scalar_shape, &grad_wrt_max)); FakeQuantWithMinMaxVarsGradientFunctor<Device> functor; functor(context->eigen_device<Device>(), gradient.flat<float>(), input.flat<float>(), min.scalar<float>(), max.scalar<float>(), quant_min_, quant_max_, grad_wrt_input->flat<float>(), grad_wrt_min->scalar<float>(), grad_wrt_max->scalar<float>()); } private: int quant_min_; int quant_max_; }; REGISTER_KERNEL_BUILDER(Name("FakeQuantWithMinMaxVars").Device(DEVICE_CPU), FakeQuantWithMinMaxVarsOp<CPUDevice>); REGISTER_KERNEL_BUILDER( Name("FakeQuantWithMinMaxVarsGradient").Device(DEVICE_CPU), FakeQuantWithMinMaxVarsGradientOp<CPUDevice>); #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) template <> void FakeQuantWithMinMaxVarsFunctor<GPUDevice>::operator()( const GPUDevice& d, typename TTypes<float>::ConstFlat inputs, typename TTypes<float>::ConstScalar min, typename TTypes<float>::ConstScalar max, const int quant_min, const int quant_max, typename TTypes<float>::Flat output); extern template struct FakeQuantWithMinMaxVarsFunctor<GPUDevice>; REGISTER_KERNEL_BUILDER(Name("FakeQuantWithMinMaxVars") .Device(DEVICE_GPU) .HostMemory("min") .HostMemory("max"), FakeQuantWithMinMaxVarsOp<GPUDevice>); template <> void FakeQuantWithMinMaxVarsGradientFunctor<GPUDevice>::operator()( const GPUDevice& d, typename TTypes<float>::ConstFlat gradients, typename TTypes<float>::ConstFlat inputs, typename TTypes<float>::ConstScalar min, typename TTypes<float>::ConstScalar max, const int quant_min, const int quant_max, typename TTypes<float>::Flat backprops_wrt_input, typename TTypes<float>::Scalar backprop_wrt_min, typename TTypes<float>::Scalar backprop_wrt_max); extern template struct FakeQuantWithMinMaxVarsGradientFunctor<GPUDevice>; REGISTER_KERNEL_BUILDER(Name("FakeQuantWithMinMaxVarsGradient") .Device(DEVICE_GPU) .HostMemory("min") .HostMemory("max"), FakeQuantWithMinMaxVarsGradientOp<GPUDevice>); #endif template <typename Device> class FakeQuantWithMinMaxVarsPerChannelOp : public OpKernel { public: explicit FakeQuantWithMinMaxVarsPerChannelOp(OpKernelConstruction* context) : OpKernel::OpKernel(context) { int num_bits; OP_REQUIRES_OK(context, context->GetAttr("num_bits", &num_bits)); OP_REQUIRES( context, IsNumBitsValid(num_bits), InvalidArgument("num_bits must be between 2 and 16, inclusive")); bool narrow_range; OP_REQUIRES_OK(context, context->GetAttr("narrow_range", &narrow_range)); quant_min_ = narrow_range ? 1 : 0; quant_max_ = (1 << num_bits) - 1; SET_USING_FAKE_QUANT(); } void Compute(OpKernelContext* context) override { CHECK_EQ(3, context->num_inputs()); const Tensor& input = context->input(0); const int depth = input.dim_size(input.dims() - 1); const Tensor& min = context->input(1); const Tensor& max = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsVector(min.shape()), InvalidArgument("`min` must be rank 1 but is rank ", min.dims())); OP_REQUIRES(context, min.dim_size(0) == depth, InvalidArgument("min has incorrect size, expected ", depth, " was ", min.dim_size(0))); OP_REQUIRES( context, TensorShapeUtils::IsVector(max.shape()), InvalidArgument("`max` must be rank 1 but is rank ", max.dims())); OP_REQUIRES(context, max.dim_size(0) == depth, InvalidArgument("max has incorrect size, expected ", depth, " was ", max.dim_size(0))); Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); FakeQuantWithMinMaxVarsPerChannelFunctor<Device> functor; functor(context->eigen_device<Device>(), input.flat_inner_dims<float, 2>(), min.vec<float>(), max.vec<float>(), quant_min_, quant_max_, output->flat_inner_dims<float, 2>()); } private: int quant_min_; int quant_max_; }; template <typename Device> class FakeQuantWithMinMaxVarsPerChannelGradientOp : public OpKernel { public: explicit FakeQuantWithMinMaxVarsPerChannelGradientOp( OpKernelConstruction* context) : OpKernel::OpKernel(context) { int num_bits; OP_REQUIRES_OK(context, context->GetAttr("num_bits", &num_bits)); OP_REQUIRES( context, IsNumBitsValid(num_bits), InvalidArgument("num_bits must be between 2 and 16, inclusive")); bool narrow_range; OP_REQUIRES_OK(context, context->GetAttr("narrow_range", &narrow_range)); quant_min_ = narrow_range ? 1 : 0; quant_max_ = (1 << num_bits) - 1; if (std::is_same<Device, Eigen::GpuDevice>::value) { OP_REQUIRES( context, !OpDeterminismRequired(), errors::Unimplemented( "Determinism is not yet supported in GPU implementation of " "FakeQuantWithMinMaxVarsPerChannelGradient.")); } } void Compute(OpKernelContext* context) override { CHECK_EQ(4, context->num_inputs()); const Tensor& gradient = context->input(0); const Tensor& input = context->input(1); OP_REQUIRES(context, input.IsSameSize(gradient), InvalidArgument("gradient and input must be the same size")); const int depth = input.dim_size(input.dims() - 1); const Tensor& min = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsVector(min.shape()), InvalidArgument("`min` must be rank 1 but is rank ", min.dims())); OP_REQUIRES(context, min.dim_size(0) == depth, InvalidArgument("min has incorrect size, expected ", depth, " was ", min.dim_size(0))); const Tensor& max = context->input(3); OP_REQUIRES( context, TensorShapeUtils::IsVector(max.shape()), InvalidArgument("`max` must be rank 1 but is rank ", max.dims())); OP_REQUIRES(context, max.dim_size(0) == depth, InvalidArgument("max has incorrect size, expected ", depth, " was ", max.dim_size(0))); Tensor* grad_wrt_input; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &grad_wrt_input)); TensorShape min_max_shape({input.dim_size(input.dims() - 1)}); Tensor* grad_wrt_min; OP_REQUIRES_OK(context, context->allocate_output(1, min_max_shape, &grad_wrt_min)); Tensor* grad_wrt_max; OP_REQUIRES_OK(context, context->allocate_output(2, min_max_shape, &grad_wrt_max)); FakeQuantWithMinMaxVarsPerChannelGradientFunctor<Device> functor; functor( context->eigen_device<Device>(), gradient.flat_inner_dims<float, 2>(), input.flat_inner_dims<float, 2>(), min.vec<float>(), max.vec<float>(), quant_min_, quant_max_, grad_wrt_input->flat_inner_dims<float, 2>(), grad_wrt_min->vec<float>(), grad_wrt_max->vec<float>()); } private: int quant_min_; int quant_max_; }; REGISTER_KERNEL_BUILDER( Name("FakeQuantWithMinMaxVarsPerChannel").Device(DEVICE_CPU), FakeQuantWithMinMaxVarsPerChannelOp<CPUDevice>); REGISTER_KERNEL_BUILDER( Name("FakeQuantWithMinMaxVarsPerChannelGradient").Device(DEVICE_CPU), FakeQuantWithMinMaxVarsPerChannelGradientOp<CPUDevice>); #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) template <> void FakeQuantWithMinMaxVarsPerChannelFunctor<GPUDevice>::operator()( const GPUDevice& d, typename TTypes<float>::ConstMatrix inputs, typename TTypes<float>::ConstFlat min, typename TTypes<float>::ConstFlat max, const int quant_min, const int quant_max, typename TTypes<float>::Matrix outputs); extern template struct FakeQuantWithMinMaxVarsPerChannelFunctor<GPUDevice>; REGISTER_KERNEL_BUILDER(Name("FakeQuantWithMinMaxVarsPerChannel") .Device(DEVICE_GPU) .HostMemory("min") .HostMemory("max"), FakeQuantWithMinMaxVarsPerChannelOp<GPUDevice>); template <> void FakeQuantWithMinMaxVarsPerChannelGradientFunctor<GPUDevice>::operator()( const GPUDevice& d, typename TTypes<float>::ConstMatrix gradients, typename TTypes<float>::ConstMatrix inputs, typename TTypes<float>::ConstVec min, typename TTypes<float>::ConstVec max, const int quant_min, const int quant_max, typename TTypes<float>::Matrix backprops_wrt_input, typename TTypes<float>::Vec backprop_wrt_min, typename TTypes<float>::Vec backprop_wrt_max); extern template struct FakeQuantWithMinMaxVarsPerChannelGradientFunctor< GPUDevice>; REGISTER_KERNEL_BUILDER(Name("FakeQuantWithMinMaxVarsPerChannelGradient") .Device(DEVICE_GPU) .HostMemory("min") .HostMemory("max"), FakeQuantWithMinMaxVarsPerChannelGradientOp<GPUDevice>); #endif }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { using tensorflow::AllocatorAttributes; using tensorflow::DT_FLOAT; using tensorflow::NodeDefBuilder; using tensorflow::OpsTestBase; using tensorflow::Tensor; using tensorflow::TensorShape; using tensorflow::test::ExpectClose; using tensorflow::test::FillValues; class QuantOpsTest : public OpsTestBase { protected: void AddRandomInput(const TensorShape& shape) { CHECK_GT(input_types_.size(), inputs_.size()) << "Adding more inputs than types; perhaps you need to call MakeOp"; Tensor* input = new Tensor(device_->GetAllocator(AllocatorAttributes()), DT_FLOAT, shape); input->flat<float>().setRandom(); tensors_.push_back(input); bool is_ref = IsRefType(input_types_[inputs_.size()]); if (is_ref) { CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]), DT_FLOAT); inputs_.push_back({&lock_for_refs_, input}); } else { CHECK_EQ(input_types_[inputs_.size()], DT_FLOAT); inputs_.push_back({nullptr, input}); } } void RunTestFakeQuantWithMinMaxArgs(const int num_bits, const bool narrow_range, const float min, const float max, const TensorShape& shape, const absl::Span<const float> data, absl::Span<const float> expected_data, const double atol = -1.0, const double rtol = -1.0, const DeviceType device = DEVICE_CPU) { if (device == DEVICE_GPU) { SetDevice(device, std::unique_ptr<tensorflow::Device>(DeviceFactory::NewDevice( "GPU", {}, "/job:a/replica:0/task:0"))); } TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxArgs") .Input(FakeInput(DT_FLOAT)) .Attr("min", min) .Attr("max", max) .Attr("num_bits", num_bits) .Attr("narrow_range", narrow_range) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(shape, data); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TF_EXPECT_OK(device_->Sync()); Tensor expected(allocator(), DT_FLOAT, shape); FillValues<float>(&expected, expected_data); ExpectClose(expected, *output, atol, rtol); } void RunTestFakeQuantWithMinMaxVars(const int num_bits, const bool narrow_range, const float min, const float max, const TensorShape& shape, const absl::Span<const float> data, absl::Span<const float> expected_data, const double atol = -1.0, const double rtol = -1.0, const DeviceType device = DEVICE_CPU) { if (device == DEVICE_GPU) { SetDevice(device, std::unique_ptr<tensorflow::Device>(DeviceFactory::NewDevice( "GPU", {}, "/job:a/replica:0/task:0"))); } TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVars") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("num_bits", num_bits) .Attr("narrow_range", narrow_range) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(shape, data); AddInputFromArray<float>(TensorShape({}), {min}); AddInputFromArray<float>(TensorShape({}), {max}); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); FillValues<float>(&expected, expected_data); ExpectClose(expected, *output, atol, rtol); } void RunTestFakeQuantWithMinMaxVarsPerChannel( const int num_bits, const bool narrow_range, const TensorShape& minmax_shape, const absl::Span<const float> min, const absl::Span<const float> max, const TensorShape& shape, const absl::Span<const float> data, absl::Span<const float> expected_data, const double atol = -1.0, const double rtol = -1.0, const DeviceType device = DEVICE_CPU) { if (device == DEVICE_GPU) { SetDevice(device, std::unique_ptr<tensorflow::Device>(DeviceFactory::NewDevice( "GPU", {}, "/job:a/replica:0/task:0"))); } TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannel") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("num_bits", num_bits) .Attr("narrow_range", narrow_range) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(shape, data); AddInputFromArray<float>(minmax_shape, min); AddInputFromArray<float>(minmax_shape, max); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); Tensor expected(allocator(), DT_FLOAT, shape); FillValues<float>(&expected, expected_data); ExpectClose(expected, *output, atol, rtol); } }; TEST_F(QuantOpsTest, WithArgsSymmetricRangeZeroInput_RegularRange) { RunTestFakeQuantWithMinMaxArgs(8, false, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0); } #if GOOGLE_CUDA TEST_F(QuantOpsTest, WithArgsSymmetricRangeZeroInput_RegularRange_Gpu) { RunTestFakeQuantWithMinMaxArgs(8, false, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0, DEVICE_GPU); } #endif TEST_F(QuantOpsTest, WithArgsSymmetricRangeZeroInput_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(8, true, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0); } #if GOOGLE_CUDA TEST_F(QuantOpsTest, WithArgsSymmetricRangeZeroInput_NarrowRange_Gpu) { RunTestFakeQuantWithMinMaxArgs(8, true, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0, DEVICE_GPU); } #endif TEST_F(QuantOpsTest, WithArgsNoNudging_RegularRange) { RunTestFakeQuantWithMinMaxArgs( 8, false, -10.0f, 53.75f, TensorShape({2, 3}), {-10.1f, -10.0f, -9.9f, -9.75f, 53.75f, 53.8f}, {-10.0f, -10.0f, -10.0f, -9.75f, 53.75f, 53.75f}); } TEST_F(QuantOpsTest, WithArgsNoNudging_NarrowRange) { RunTestFakeQuantWithMinMaxArgs( 8, true, -10.0f, 53.5f, TensorShape({2, 3}), {-10.1f, -10.0f, -9.9f, -9.75f, 53.5f, 53.6f}, {-10.0f, -10.0f, -10.0f, -9.75f, 53.5f, 53.5f}); } TEST_F(QuantOpsTest, WithArgsNudgedDown_RegularRange) { RunTestFakeQuantWithMinMaxArgs(8, false, -0.1f, 63.65f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.75f, 63.8f}, {0.0f, 0.0f, 0.0f, 0.25f, 63.75f, 63.75f}); } TEST_F(QuantOpsTest, WithArgsNudgedDown_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(8, true, -0.1f, 63.4f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.5f, 63.6f}, {0.0f, 0.0f, 0.0f, 0.25f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithArgsNudgedUp_RegularRange) { RunTestFakeQuantWithMinMaxArgs(8, false, -0.1275f, 63.6225f, TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.5f, 63.6f}, {-0.25f, -0.25f, -0.25f, 0.0f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithArgsNudgedUp_NarrowRange) { RunTestFakeQuantWithMinMaxArgs( 8, true, -0.1275f, 63.3725f, TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.25f, 63.3f}, {-0.25f, -0.25f, -0.25f, 0.0f, 63.25f, 63.25f}); } TEST_F(QuantOpsTest, WithArgsNudgedZeroIs255_RegularRange) { RunTestFakeQuantWithMinMaxArgs( 8, false, -63.65f, 0.1f, TensorShape({2, 3}), {-63.8f, -63.75f, -63.7f, -63.5f, 0.0f, 0.1f}, {-63.75f, -63.75f, -63.75f, -63.5f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedZeroIs255_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(8, true, -63.4f, 0.1f, TensorShape({2, 3}), {-63.6f, -63.5f, -63.4f, -63.25f, 0.0f, 0.1f}, {-63.5f, -63.5f, -63.5f, -63.25f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithArgsNoNudging_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxArgs(4, false, -6.0f, 1.5f, TensorShape({2, 3}), {-6.1f, -6.0f, -5.9f, -5.5f, 1.5f, 1.6f}, {-6.0f, -6.0f, -6.0f, -5.5f, 1.5f, 1.5f}); } TEST_F(QuantOpsTest, WithArgsNoNudging_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(4, true, -6.0f, 1.0f, TensorShape({2, 3}), {-6.1f, -6.0f, -5.9f, -5.5f, 1.0f, 1.1f}, {-6.0f, -6.0f, -6.0f, -5.5f, 1.0f, 1.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedDown_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxArgs(4, false, -0.1f, 7.4f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.5f, 7.6f}, {0.0f, 0.0f, 0.0f, 0.5f, 7.5f, 7.5f}); } TEST_F(QuantOpsTest, WithArgsNudgedDown_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(4, true, -0.1f, 6.9f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.0f, 7.1f}, {0.0f, 0.0f, 0.0f, 0.5f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedUp_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxArgs(4, false, -0.4f, 7.1f, TensorShape({2, 3}), {-0.6f, -0.5f, -0.24f, 0.0f, 7.0f, 7.1f}, {-0.5f, -0.5f, -0.00f, 0.0f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedUp_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(4, true, -0.4f, 6.6f, TensorShape({2, 3}), {-0.6f, -0.5f, -0.24f, 0.0f, 6.5f, 6.6f}, {-0.5f, -0.5f, 0.0f, 0.0f, 6.5f, 6.5f}); } TEST_F(QuantOpsTest, WithArgsNudgedZeroIs15_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxArgs(4, false, -7.3f, 0.2f, TensorShape({2, 3}), {-7.6f, -7.5f, -7.4f, -7.2f, 0.0f, 0.1f}, {-7.5f, -7.5f, -7.5f, -7.0f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedZeroIs15_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(4, true, -6.8f, 0.2f, TensorShape({2, 3}), {-7.1f, -7.0f, -6.9f, -6.7f, 0.0f, 0.1f}, {-7.0f, -7.0f, -7.0f, -6.5f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithArgsNoNudging_2Bits_RegularRange) { RunTestFakeQuantWithMinMaxArgs(2, false, -1.0f, 0.5f, TensorShape({2, 3}), {-1.1f, -1.0f, -0.9f, -0.3f, 0.1f, 0.6f}, {-1.0f, -1.0f, -1.0f, -0.5f, 0.0f, 0.5f}); } TEST_F(QuantOpsTest, WithArgsNoNudging_2Bits_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(2, true, -1.0f, 0.0f, TensorShape({2, 3}), {-1.1f, -1.0f, -0.9f, -0.3f, 0.0f, 0.1f}, {-1.0f, -1.0f, -1.0f, -0.5f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedDown_2Bits_RegularRange) { RunTestFakeQuantWithMinMaxArgs(2, false, -0.1f, 1.4f, TensorShape({2, 3}), {-0.2f, 0.1f, 0.7f, 1.0f, 1.3f, 1.6f}, {0.0f, 0.0f, 0.5f, 1.0f, 1.5f, 1.5f}); } TEST_F(QuantOpsTest, WithArgsNudgedDown_2Bits_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(2, true, -0.1f, 0.9f, TensorShape({2, 3}), {-0.1f, 0.1f, 0.7f, 0.9f, 1.0f, 1.1f}, {-0.0f, 0.0f, 0.5f, 1.0f, 1.0f, 1.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedUp_2Bits_RegularRange) { RunTestFakeQuantWithMinMaxArgs(2, false, -0.4f, 1.1f, TensorShape({2, 3}), {-0.6f, -0.5f, -0.24f, 0.0f, 1.0f, 1.1f}, {-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 1.0f}); } TEST_F(QuantOpsTest, WithArgsNudgedUp_2Bits_NarrowRange) { RunTestFakeQuantWithMinMaxArgs(2, true, -0.4f, 0.6f, TensorShape({2, 3}), {-0.6f, -0.5f, -0.24f, 0.0f, 0.5f, 0.6f}, {-0.5f, -0.5f, -0.00f, 0.0f, 0.5f, 0.5f}); } TEST_F(QuantOpsTest, WithArgsGradient_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxArgsGradient") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("min", -0.125f) .Attr("max", 63.625f) .Attr("narrow_range", false) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.5f, 63.6f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); auto input_flat = GetInput(0).flat<float>(); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); FillValues<float>(&expected, {0.0f, input_flat(1), input_flat(2), input_flat(3), input_flat(4), 0.0f}); ExpectClose(expected, *output); } TEST_F(QuantOpsTest, WithArgsGradient_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxArgsGradient") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("min", -0.125f) .Attr("max", 63.375f) .Attr("narrow_range", true) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.25f, 63.3f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); auto input_flat = GetInput(0).flat<float>(); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); FillValues<float>(&expected, {0.0f, input_flat(1), input_flat(2), input_flat(3), input_flat(4), 0.0f}); ExpectClose(expected, *output); } TEST_F(QuantOpsTest, WithArgsGradient_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxArgsGradient") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("min", -0.4f) .Attr("max", 7.1f) .Attr("num_bits", 4) .Attr("narrow_range", false) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.6f, -0.5f, -0.4f, 0.0f, 7.0f, 7.1f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); auto input_flat = GetInput(0).flat<float>(); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); FillValues<float>(&expected, {0.0f, input_flat(1), input_flat(2), input_flat(3), input_flat(4), 0.0f}); ExpectClose(expected, *output); } TEST_F(QuantOpsTest, WithArgsGradient_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxArgsGradient") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("min", -0.4f) .Attr("max", 6.6f) .Attr("num_bits", 4) .Attr("narrow_range", true) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.6f, -0.5f, -0.4f, 0.0f, 6.5f, 6.6f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); auto input_flat = GetInput(0).flat<float>(); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); FillValues<float>(&expected, {0.0f, input_flat(1), input_flat(2), input_flat(3), input_flat(4), 0.0f}); ExpectClose(expected, *output); } TEST_F(QuantOpsTest, WithVars_ZeroMinAndMax) { RunTestFakeQuantWithMinMaxVars(8, false, 0.0f, 0.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithVarsSymmetricRangeZeroInput_RegularRange) { RunTestFakeQuantWithMinMaxVars(8, false, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0); } #if GOOGLE_CUDA TEST_F(QuantOpsTest, WithVarsSymmetricRangeZeroInput_RegularRange_Gpu) { RunTestFakeQuantWithMinMaxVars(8, false, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0, DEVICE_GPU); } #endif TEST_F(QuantOpsTest, WithVarsSymmetricRangeZeroInput_NarrowRange) { RunTestFakeQuantWithMinMaxVars(8, true, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0); } #if GOOGLE_CUDA TEST_F(QuantOpsTest, WithVarsSymmetricRangeZeroInput_NarrowRange_Gpu) { RunTestFakeQuantWithMinMaxVars(8, true, -10.0f, 10.0f, TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0, DEVICE_GPU); } #endif TEST_F(QuantOpsTest, WithVarsNoNudging_RegularRange) { RunTestFakeQuantWithMinMaxVars( 8, false, -10.0f, 53.75f, TensorShape({2, 3}), {-10.1f, -10.0f, -9.9f, -9.75f, 53.75f, 53.8f}, {-10.0f, -10.0f, -10.0f, -9.75f, 53.75f, 53.75f}); } TEST_F(QuantOpsTest, WithVarsNoNudging_NarrowRange) { RunTestFakeQuantWithMinMaxVars( 8, true, -10.0f, 53.5f, TensorShape({2, 3}), {-10.1f, -10.0f, -9.90f, -9.75f, 53.5f, 53.6f}, {-10.0f, -10.0f, -10.0f, -9.75f, 53.5f, 53.5f}); } TEST_F(QuantOpsTest, WithVarsNudgedDown_RegularRange) { RunTestFakeQuantWithMinMaxVars(8, false, -0.1f, 63.65f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.75f, 63.8f}, {-0.0f, 0.0f, 0.0f, 0.25f, 63.75f, 63.75f}); } TEST_F(QuantOpsTest, WithVarsNudgedDown_NarrowRange) { RunTestFakeQuantWithMinMaxVars(8, true, -0.1f, 63.4f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.5f, 63.6f}, {-0.0f, 0.0f, 0.0f, 0.25f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsNudgedUp_RegularRange) { RunTestFakeQuantWithMinMaxVars(8, false, -0.125f, 63.625f, TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.5f, 63.6f}, {-0.25f, -0.25f, -0.25f, 0.0f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsNudgedUp_NarrowRange) { RunTestFakeQuantWithMinMaxVars( 8, true, -0.125f, 63.375f, TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.25f, 63.3f}, {-0.25f, -0.25f, -0.25f, 0.0f, 63.25f, 63.25f}); } TEST_F(QuantOpsTest, WithVarsNudgedZeroIs255_RegularRange) { RunTestFakeQuantWithMinMaxVars( 8, false, -63.65f, 0.1f, TensorShape({2, 3}), {-63.80f, -63.75f, -63.70f, -63.5f, 0.0f, 0.1f}, {-63.75f, -63.75f, -63.75f, -63.5f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithVarsNudgedZeroIs255_NarrowRange) { RunTestFakeQuantWithMinMaxVars(8, true, -63.4f, 0.1f, TensorShape({2, 3}), {-63.6f, -63.5f, -63.4f, -63.25f, 0.0f, 0.1f}, {-63.5f, -63.5f, -63.5f, -63.25f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithVarsNoNudging_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVars(4, false, -6.0f, 1.5f, TensorShape({2, 3}), {-6.1f, -6.0f, -5.9f, -5.5f, 1.5f, 1.6f}, {-6.0f, -6.0f, -6.0f, -5.5f, 1.5f, 1.5f}); } TEST_F(QuantOpsTest, WithVarsNoNudging_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVars(4, true, -6.0f, 1.0f, TensorShape({2, 3}), {-6.1f, -6.0f, -5.9f, -5.5f, 1.0f, 1.1f}, {-6.0f, -6.0f, -6.0f, -5.5f, 1.0f, 1.0f}); } TEST_F(QuantOpsTest, WithVarsNudgedDown_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVars(4, false, -0.1f, 7.4f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.5f, 7.6f}, {-0.0f, 0.0f, 0.0f, 0.5f, 7.5f, 7.5f}); } TEST_F(QuantOpsTest, WithVarsNudgedDown_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVars(4, true, -0.1f, 6.9f, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.0f, 7.1f}, {-0.0f, 0.0f, 0.0f, 0.5f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsNudgedUp_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVars(4, false, -0.4f, 7.1f, TensorShape({2, 3}), {-0.6f, -0.5f, -0.24f, 0.0f, 7.0f, 7.1f}, {-0.5f, -0.5f, -0.00f, 0.0f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsNudgedUp_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVars(4, true, -0.4f, 6.6f, TensorShape({2, 3}), {-0.6f, -0.5f, -0.24f, 0.0f, 6.5f, 6.6f}, {-0.5f, -0.5f, -0.00f, 0.0f, 6.5f, 6.5f}); } TEST_F(QuantOpsTest, WithVarsNudgedZero15_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVars(4, false, -7.3f, 0.2f, TensorShape({2, 3}), {-7.6f, -7.5f, -7.4f, -7.2f, 0.0f, 0.1f}, {-7.5f, -7.5f, -7.5f, -7.0f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithVarsNudgedZero15_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVars(4, true, -6.8f, 0.2f, TensorShape({2, 3}), {-7.1f, -7.0f, -6.9f, -6.5f, 0.0f, 0.1f}, {-7.0f, -7.0f, -7.0f, -6.5f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithVarsGradient_ZeroMinAndMax) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto in_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {in_flat(0), in_flat(1), in_flat(2), in_flat(3), in_flat(4), in_flat(5)}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_min.flat<float>()(0) = 0.0f; ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_max.flat<float>()(0) = 0.0f; ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsGradient_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.5f, 63.6f}); AddInputFromArray<float>(TensorShape({}), {-0.125f}); AddInputFromArray<float>(TensorShape({}), {63.625f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto in_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, in_flat(1), in_flat(2), in_flat(3), in_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_min.flat<float>()(0) = in_flat(0); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_max.flat<float>()(0) = in_flat(5); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsGradient_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsGradient") .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.25f, 63.3f}); AddInputFromArray<float>(TensorShape({}), {-0.125f}); AddInputFromArray<float>(TensorShape({}), {63.375f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto in_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, in_flat(1), in_flat(2), in_flat(3), in_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_min.flat<float>()(0) = in_flat(0); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_max.flat<float>()(0) = in_flat(5); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsGradient_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsGradient") .Attr("num_bits", 4) .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.6f, -0.5f, -0.4f, 0.0f, 7.0f, 7.1f}); AddInputFromArray<float>(TensorShape({}), {-0.4f}); AddInputFromArray<float>(TensorShape({}), {7.1f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto in_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, in_flat(1), in_flat(2), in_flat(3), in_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_min.flat<float>()(0) = in_flat(0); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_max.flat<float>()(0) = in_flat(5); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsGradient_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsGradient") .Attr("num_bits", 4) .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.6f, -0.5f, -0.4f, 0.0f, 6.5f, 6.6f}); AddInputFromArray<float>(TensorShape({}), {-0.4f}); AddInputFromArray<float>(TensorShape({}), {6.6f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto in_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, in_flat(1), in_flat(2), in_flat(3), in_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_min.flat<float>()(0) = in_flat(0); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({})); expected_bprop_wrt_max.flat<float>()(0) = in_flat(5); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannel_ZeroMinAndMax) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({4}), {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}, TensorShape({4}), {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}); } TEST_F(QuantOpsTest, WithVarsPerChannelSymmetricRangeZeroInput_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({4}), {-10.0f, -10.0f, -10.0f, -10.0f}, {10.0f, 10.0f, 10.0f, 10.0f}, TensorShape({4}), {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0); } #if GOOGLE_CUDA TEST_F(QuantOpsTest, WithVarsPerChannelSymmetricRangeZeroInput_RegularRange_Gpu) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({4}), {-10.0f, -10.0f, -10.0f, -10.0f}, {10.0f, 10.0f, 10.0f, 10.0f}, TensorShape({4}), {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0, DEVICE_GPU); } #endif TEST_F(QuantOpsTest, WithVarsPerChannelSymmetricRangeZeroInput_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({4}), {-10.0f, -10.0f, -10.0f, -10.0f}, {10.0f, 10.0f, 10.0f, 10.0f}, TensorShape({4}), {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0); } #if GOOGLE_CUDA TEST_F(QuantOpsTest, WithVarsPerChannelSymmetricRangeZeroInput_NarrowRange_Gpu) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({4}), {-10.0f, -10.0f, -10.0f, -10.0f}, {10.0f, 10.0f, 10.0f, 10.0f}, TensorShape({4}), {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}, 0.0, 0.0, DEVICE_GPU); } #endif TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedDown_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {63.65f, 63.65f, 63.65f, 63.65f}, TensorShape({4}), {-0.1f, 0.0f, 63.75f, 63.8f}, {0.0f, 0.0f, 63.75f, 63.75f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedDown_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {63.4f, 63.4f, 63.4f, 63.4f}, TensorShape({4}), {-0.1f, 0.0f, 63.5f, 63.6f}, {0.0f, 0.0f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedUp_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}, {63.625f, 63.625f, 63.625f, 63.625f}, TensorShape({4}), {-0.26f, -0.25f, -0.24f, 63.6f}, {-0.25f, -0.25f, -0.25f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedUp_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}, {63.375f, 63.375f, 63.375f, 63.375f}, TensorShape({4}), {-0.26f, -0.25f, -0.24f, 63.3f}, {-0.25f, -0.25f, -0.25f, 63.25f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedDown_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({3}), {-0.1f, -0.1f, -0.1f}, {63.65f, 63.65f, 63.65f}, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.75f, 63.80f}, {-0.0f, 0.0f, 0.0f, 0.25f, 63.75f, 63.75f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedDown_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({3}), {-0.1f, -0.1f, -0.1f}, {63.4f, 63.4f, 63.4f}, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.5f, 63.6f}, {0.0f, 0.0f, 0.0f, 0.25f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedUp_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({3}), {-0.125f, -0.125f, -0.125f}, {63.625f, 63.625f, 63.625f}, TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.5f, 63.6f}, {-0.25f, -0.25f, -0.25f, 0.0f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedUp_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({3}), {-0.125f, -0.125f, -0.125f}, {63.375f, 63.375f, 63.375f}, TensorShape({2, 3}), {-0.26f, -0.25f, -0.24f, 0.0f, 63.25f, 63.3f}, {-0.25f, -0.25f, -0.25f, 0.0f, 63.25f, 63.25f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedDown_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {63.65f, 63.65f, 63.65f, 63.65f}, TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 0.1f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 2.25f, 63.0f, 63.25f, 63.5f, 63.7f, 63.75f, 63.8f, 63.9f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, { 0.0f, 0.0f, 0.0f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 2.25f, 63.0f, 63.25f, 63.5f, 63.75f, 63.75f, 63.75f, 63.75f, 63.75f, 63.75f, 63.75f, 63.75f, 63.75f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedDown_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {63.4f, 63.4f, 63.4f, 63.4f}, TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 0.1f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 2.25f, 63.0f, 63.25f, 63.3f, 63.4f, 63.5f, 63.6f, 63.7f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, { 0.0f, 0.0f, 0.0f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 2.25f, 63.0f, 63.25f, 63.25f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedUp_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, false, TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}, {63.625f, 63.625f, 63.625f, 63.625f}, TensorShape({1, 2, 3, 4}), { -0.3f, -0.25f, -0.2f, 0.0f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 63.0f, 63.25f, 63.4f, 63.5f, 63.6f, 63.7f, 100.0f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, {-0.25f, -0.25f, -0.25f, 0.0f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 63.0f, 63.25f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f, 63.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedUp_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 8, true, TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}, {63.375f, 63.375f, 63.375f, 63.375f}, TensorShape({1, 2, 3, 4}), { -0.3f, -0.25f, -0.2f, 0.0f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 63.0f, 63.2f, 63.25f, 63.3f, 63.4f, 63.5f, 100.0f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, { -0.25f, -0.25f, -0.25f, 0.0f, 0.25f, 0.5f, 0.75f, 1.0f, 1.25f, 1.5f, 1.75f, 2.0f, 63.0f, 63.25f, 63.25f, 63.25f, 63.25f, 63.25f, 63.25f, 63.25f, 63.25f, 63.25f, 63.25f, 63.25f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedDown_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, false, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {7.4f, 7.4f, 7.4f, 7.4f}, TensorShape({4}), {-0.1f, 0.0f, 7.5f, 7.6f}, {0.0f, 0.0f, 7.5f, 7.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedDown_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, true, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {6.9f, 6.9f, 6.9f, 6.9f}, TensorShape({4}), {-0.1f, 0.0f, 7.0f, 7.1f}, {0.0f, 0.0f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedUp_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, false, TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}, {7.1f, 7.1f, 7.1f, 7.1f}, TensorShape({4}), {-0.6f, -0.5f, 7.0f, 7.1f}, {-0.5f, -0.5f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1NudgedUp_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, true, TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}, {6.6f, 6.6f, 6.6f, 6.6f}, TensorShape({4}), {-0.6f, -0.5f, 6.5f, 6.6f}, {-0.5f, -0.5f, 6.5f, 6.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedDown_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, false, TensorShape({3}), {-0.1f, -0.1f, -0.1f}, {7.4f, 7.4f, 7.4f}, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.5f, 7.6f}, {0.0f, 0.0f, 0.0f, 0.5f, 7.5f, 7.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedDown_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, true, TensorShape({3}), {-0.1f, -0.1f, -0.1f}, {6.9f, 6.9f, 6.9f}, TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.0f, 7.1f}, {0.0f, 0.0f, 0.0f, 0.5f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedUp_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, false, TensorShape({3}), {-0.4f, -0.4f, -0.4f}, {7.1f, 7.1f, 7.1f}, TensorShape({2, 3}), {-0.51f, -0.5f, -0.24f, 0.0f, 7.0f, 7.1f}, {-0.5f, -0.5f, 0.0f, 0.0f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2NudgedUp_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, true, TensorShape({3}), {-0.4f, -0.4f, -0.4f}, {6.6f, 6.6f, 6.6f}, TensorShape({2, 3}), {-0.6f, -0.5f, -0.24f, 0.0f, 6.5f, 6.6f}, {-0.5f, -0.5f, 0.0f, 0.0f, 6.5f, 6.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedDown_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, false, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {7.4f, 7.4f, 7.4f, 7.4f}, TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 0.1f, 0.5f, 1.0f, 1.5f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 6.0f, 6.5f, 7.0f, 7.4f, 7.5f, 7.7f, 7.8f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, { 0.0f, 0.0f, 0.0f, 0.5f, 1.0f, 1.5f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 6.0f, 6.5f, 7.0f, 7.5f, 7.5f, 7.5f, 7.5f, 7.5f, 7.5f, 7.5f, 7.5f, 7.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedDown_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, true, TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}, {6.9f, 6.9f, 6.9f, 6.9f}, TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 0.1f, 0.5f, 1.0f, 1.5f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 6.0f, 6.5f, 6.8f, 6.9f, 7.0f, 7.1f, 7.2f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, { 0.0f, 0.0f, 0.0f, 0.5f, 1.0f, 1.5f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 6.0f, 6.5f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedUp_4Bits_RegularRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, false, TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}, {7.1f, 7.1f, 7.1f, 7.1f}, TensorShape({1, 2, 3, 4}), { -0.6f, -0.5f, -0.4f, 0.0f, 0.5f, 1.0f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 6.0f, 6.5f, 6.9f, 7.0f, 7.1f, 7.7f, 100.0f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, { -0.5f, -0.5f, -0.5f, 0.0f, 0.5f, 1.0f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 6.0f, 6.5f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f, 7.0f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4NudgedUp_4Bits_NarrowRange) { RunTestFakeQuantWithMinMaxVarsPerChannel( 4, true, TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}, {6.6f, 6.6f, 6.6f, 6.6f}, TensorShape({1, 2, 3, 4}), { -0.6f, -0.5f, -0.4f, 0.0f, 0.5f, 1.0f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 5.5f, 6.0f, 6.4f, 6.5f, 6.6f, 6.7f, 100.0f, 100.0f, 100.0f, 100.0f, 100.0f, 1000.0f}, { -0.5f , -0.5f, -0.5f, 0.0f, 0.5f, 1.0f, 1.5f, 2.0f, 2.5f, 3.0f, 3.5f, 4.0f, 5.5f, 6.0f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f}); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedDown_ZeroMinAndMax) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {0.0, 0.0, 0.0, 0.0f}); AddInputFromArray<float>(TensorShape({4}), {0.0, 0.0, 0.0, 0.0f}); AddInputFromArray<float>(TensorShape({4}), {0.0, 0.0, 0.0, 0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {grad_flat(0), grad_flat(1), grad_flat(2), grad_flat(3)}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {0.0f, 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedDown_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.1f, 0.0f, 63.75f, 63.8f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {63.65f, 63.65f, 63.65f, 63.65f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedDown_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.1f, 0.0f, 63.5f, 63.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {63.4f, 63.4f, 63.4f, 63.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedUp_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.3f, -0.25f, 63.5f, 63.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}); AddInputFromArray<float>(TensorShape({4}), {63.625f, 63.625f, 63.625f, 63.625f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedUp_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.3f, -0.25f, 63.25f, 63.3f}); AddInputFromArray<float>(TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}); AddInputFromArray<float>(TensorShape({4}), {63.375f, 63.375f, 63.375f, 63.375f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedDown_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.75f, 63.8f}); AddInputFromArray<float>(TensorShape({3}), {-0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({3}), {63.65f, 63.65f, 63.65f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedDown_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.25f, 63.5f, 63.6f}); AddInputFromArray<float>(TensorShape({3}), {-0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({3}), {63.4f, 63.4f, 63.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedUp_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.3f, -0.25f, -0.2f, 0.0f, 63.5f, 63.6f}); AddInputFromArray<float>(TensorShape({3}), {-0.125f, -0.125f, -0.125f}); AddInputFromArray<float>(TensorShape({3}), {63.625f, 63.625f, 63.625f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedUp_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.3f, -0.25f, -0.2f, 0.0f, 63.25f, 63.3f}); AddInputFromArray<float>(TensorShape({3}), {-0.125f, -0.125f, -0.125f}); AddInputFromArray<float>(TensorShape({3}), {63.375f, 63.375f, 63.375f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedDown_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 63.75f, 63.8f, -0.1f, 0.0f, 63.75f, 63.8f, -0.1f, 0.0f, 63.75f, 63.8f, -0.1f, 0.0f, 63.75f, 63.8f, -0.1f, 0.0f, 63.75f, 63.8f, -0.1f, 0.0f, 63.75f, 63.8f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {63.65f, 63.65f, 63.65f, 63.65f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedDown_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 63.5f, 63.6f, -0.1f, 0.0f, 63.5f, 63.6f, -0.1f, 0.0f, 63.5f, 63.6f, -0.1f, 0.0f, 63.5f, 63.6f, -0.1f, 0.0f, 63.5f, 63.6f, -0.1f, 0.0f, 63.5f, 63.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {63.4f, 63.4f, 63.4f, 63.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedUp_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), {-0.3f, -0.25f, 63.5f, 63.6f, -0.3f, -0.25f, 63.5f, 63.6f, -0.3f, -0.25f, 63.5f, 63.6f, -0.3f, -0.25f, 63.5f, 63.6f, -0.3f, -0.25f, 63.5f, 63.6f, -0.3f, -0.25f, 63.5f, 63.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}); AddInputFromArray<float>(TensorShape({4}), {63.625f, 63.625f, 63.625f, 63.625f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedUp_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), { -0.3f, -0.25f, 63.25f, 63.3f, -0.3f, -0.25f, 63.25f, 63.3f, -0.3f, -0.25f, 63.25f, 63.3f, -0.3f, -0.25f, 63.25f, 63.3f, -0.3f, -0.25f, 63.25f, 63.3f, -0.3f, -0.25f, 63.25f, 63.3f}); AddInputFromArray<float>(TensorShape({4}), {-0.125f, -0.125f, -0.125f, -0.125f}); AddInputFromArray<float>(TensorShape({4}), {63.375f, 63.375f, 63.375f, 63.375f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedDown_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.1f, 0.0f, 7.5f, 7.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {7.4f, 7.4f, 7.4f, 7.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedDown_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.1f, 0.0f, 7.0f, 7.1f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {6.9f, 6.9f, 6.9f, 6.9f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedUp_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.6f, -0.5f, 7.0f, 7.1f}); AddInputFromArray<float>(TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}); AddInputFromArray<float>(TensorShape({4}), {7.1f, 7.1f, 7.1f, 7.1f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim1GradientNudgedUp_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({4})); AddInputFromArray<float>(TensorShape({4}), {-0.6f, -0.5f, 6.5f, 6.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}); AddInputFromArray<float>(TensorShape({4}), {6.6f, 6.6f, 6.6f, 6.6f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedDown_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.5f, 7.6f}); AddInputFromArray<float>(TensorShape({3}), {-0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({3}), {7.4f, 7.4f, 7.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedDown_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.1f, 0.0f, 0.1f, 0.5f, 7.0f, 7.1f}); AddInputFromArray<float>(TensorShape({3}), {-0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({3}), {6.9f, 6.9f, 6.9f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedUp_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.6f, -0.5f, -0.4f, 0.0f, 7.0f, 7.1f}); AddInputFromArray<float>(TensorShape({3}), {-0.4f, -0.4f, -0.4f}); AddInputFromArray<float>(TensorShape({3}), {7.1f, 7.1f, 7.1f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim2GradientNudgedUp_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({2, 3})); AddInputFromArray<float>(TensorShape({2, 3}), {-0.6f, -0.5f, -0.4f, 0.0f, 6.5f, 6.6f}); AddInputFromArray<float>(TensorShape({3}), {-0.4f, -0.4f, -0.4f}); AddInputFromArray<float>(TensorShape({3}), {6.6f, 6.6f, 6.6f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({2, 3})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>( &expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), grad_flat(3), grad_flat(4), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0), 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({3})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, grad_flat(5)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedDown_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 7.5f, 7.6f, -0.1f, 0.0f, 7.5f, 7.6f, -0.1f, 0.0f, 7.5f, 7.6f, -0.1f, 0.0f, 7.5f, 7.6f, -0.1f, 0.0f, 7.5f, 7.6f, -0.1f, 0.0f, 7.5f, 7.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {7.4f, 7.4f, 7.4f, 7.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedDown_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), {-0.1f, 0.0f, 7.0f, 7.1f, -0.1f, 0.0f, 7.0f, 7.1f, -0.1f, 0.0f, 7.0f, 7.1f, -0.1f, 0.0f, 7.0f, 7.1f, -0.1f, 0.0f, 7.0f, 7.1f, -0.1f, 0.0f, 7.0f, 7.1f}); AddInputFromArray<float>(TensorShape({4}), {-0.1f, -0.1f, -0.1f, -0.1f}); AddInputFromArray<float>(TensorShape({4}), {6.9f, 6.9f, 6.9f, 6.9f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedUp_4Bits_RegularRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", false) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), {-0.6f, -0.5f, 7.0f, 7.1f, -0.6f, -0.5f, 7.0f, 7.1f, -0.6f, -0.5f, 7.0f, 7.1f, -0.6f, -0.5f, 7.0f, 7.1f, -0.6f, -0.5f, 7.0f, 7.1f, -0.6f, -0.5f, 7.0f, 7.1f}); AddInputFromArray<float>(TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}); AddInputFromArray<float>(TensorShape({4}), {7.1f, 7.1f, 7.1f, 7.1f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } TEST_F(QuantOpsTest, WithVarsPerChannelDim4GradientNudgedUp_4Bits_NarrowRange) { TF_EXPECT_OK(NodeDefBuilder("op", "FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits", 4) .Attr("narrow_range", true) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddRandomInput(TensorShape({1, 2, 3, 4})); AddInputFromArray<float>(TensorShape({1, 2, 3, 4}), {-0.6f, -0.5f, 6.5f, 6.6f, -0.6f, -0.5f, 6.5f, 6.6f, -0.6f, -0.5f, 6.5f, 6.6f, -0.6f, -0.5f, 6.5f, 6.6f, -0.6f, -0.5f, 6.5f, 6.6f, -0.6f, -0.5f, 6.5f, 6.6f}); AddInputFromArray<float>(TensorShape({4}), {-0.4f, -0.4f, -0.4f, -0.4f}); AddInputFromArray<float>(TensorShape({4}), {6.6f, 6.6f, 6.6f, 6.6f}); TF_ASSERT_OK(RunOpKernel()); Tensor* output_bprop_wrt_input = GetOutput(0); Tensor expected_bprop_wrt_input(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); auto grad_flat = GetInput(0).flat<float>(); FillValues<float>(&expected_bprop_wrt_input, {0.0f, grad_flat(1), grad_flat(2), 0.0f, 0.0f, grad_flat(5), grad_flat(6), 0.0f, 0.0f, grad_flat(9), grad_flat(10), 0.0f, 0.0f, grad_flat(13), grad_flat(14), 0.0f, 0.0f, grad_flat(17), grad_flat(18), 0.0f, 0.0f, grad_flat(21), grad_flat(22), 0.0f}); ExpectClose(expected_bprop_wrt_input, *output_bprop_wrt_input); Tensor* output_bprop_wrt_min = GetOutput(1); Tensor expected_bprop_wrt_min(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_min, {grad_flat(0) + grad_flat(4) + grad_flat(8) + grad_flat(12) + grad_flat(16) + grad_flat(20), 0.0f, 0.0f, 0.0f}); ExpectClose(expected_bprop_wrt_min, *output_bprop_wrt_min); Tensor* output_bprop_wrt_max = GetOutput(2); Tensor expected_bprop_wrt_max(allocator(), DT_FLOAT, TensorShape({4})); FillValues<float>(&expected_bprop_wrt_max, {0.0f, 0.0f, 0.0f, grad_flat(3) + grad_flat(7) + grad_flat(11) + grad_flat(15) + grad_flat(19) + grad_flat(23)}); ExpectClose(expected_bprop_wrt_max, *output_bprop_wrt_max); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fake_quant_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fake_quant_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
46d571f3-161a-4386-84a9-90482896c9ef
cpp
tensorflow/tensorflow
sparse_add_op
tensorflow/core/kernels/sparse_add_op.cc
tensorflow/core/kernels/sparse_add_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { template <typename T, typename Treal> class SparseAddOp : public OpKernel { public: explicit SparseAddOp(OpKernelConstruction *ctx) : OpKernel(ctx) {} void Compute(OpKernelContext *ctx) override { const Tensor *a_indices, *b_indices, *a_values_t, *b_values_t, *a_shape, *b_shape, *thresh_t; OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices)); OP_REQUIRES_OK(ctx, ctx->input("b_indices", &b_indices)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_indices->shape()) && TensorShapeUtils::IsMatrix(b_indices->shape()), errors::InvalidArgument( "Input indices should be matrices but received shapes: ", a_indices->shape().DebugString(), " and ", b_indices->shape().DebugString())); const int64_t a_nnz = a_indices->dim_size(0); const int64_t b_nnz = b_indices->dim_size(0); const int num_dims = a_indices->dim_size(1); OP_REQUIRES(ctx, b_indices->dim_size(1) == num_dims, errors::InvalidArgument( "Input indices must have the same dimension, got ", num_dims, " and ", b_indices->dim_size(1))); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values_t->shape()) && TensorShapeUtils::IsVector(b_values_t->shape()), errors::InvalidArgument( "Input values should be vectors but received shapes: ", a_values_t->shape().DebugString(), " and ", b_values_t->shape().DebugString())); auto a_values = ctx->input(1).vec<T>(); auto b_values = ctx->input(4).vec<T>(); OP_REQUIRES( ctx, a_values.size() == a_nnz && b_values.size() == b_nnz, errors::InvalidArgument("Expected ", a_nnz, " and ", b_nnz, " non-empty input values, got ", a_values.size(), " and ", b_values.size())); OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); OP_REQUIRES_OK(ctx, ctx->input("b_shape", &b_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()) && TensorShapeUtils::IsVector(b_shape->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", a_shape->shape().DebugString(), " and ", b_shape->shape().DebugString())); OP_REQUIRES( ctx, a_shape->NumElements() == num_dims, errors::InvalidArgument("Second dimension of a_indices and length of " "a_shape must match, got ", num_dims, " and ", a_shape->NumElements())); OP_REQUIRES(ctx, num_dims > 0, errors::InvalidArgument("Tesors must not be empty")); OP_REQUIRES( ctx, a_shape->IsSameSize(*b_shape), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", a_shape->SummarizeValue(10), " and ", b_shape->SummarizeValue(10))); const auto a_shape_flat = a_shape->flat<int64_t>(); const auto b_shape_flat = b_shape->flat<int64_t>(); for (int i = 0; i < a_shape->NumElements(); ++i) { OP_REQUIRES(ctx, a_shape_flat(i) == b_shape_flat(i), errors::InvalidArgument( "Operands' shapes do not match: got ", a_shape_flat(i), " and ", b_shape_flat(i), " for dimension ", i)); } OP_REQUIRES_OK(ctx, ctx->input("thresh", &thresh_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(thresh_t->shape()), errors::InvalidArgument( "The magnitude threshold must be a scalar: got shape ", thresh_t->shape().DebugString())); const Treal thresh = thresh_t->scalar<Treal>()(); auto a_indices_mat = a_indices->matrix<int64_t>(); auto b_indices_mat = b_indices->matrix<int64_t>(); std::vector<std::pair<bool, int64>> entries_to_copy; entries_to_copy.reserve(a_nnz + b_nnz); std::vector<T> out_values; int64_t i = 0, j = 0; T s; while (i < a_nnz && j < b_nnz) { switch (sparse::DimComparator::cmp(a_indices_mat, b_indices_mat, i, j, num_dims)) { case -1: entries_to_copy.emplace_back(true, i); out_values.push_back(a_values(i)); ++i; break; case 0: s = a_values(i) + b_values(j); if (thresh <= std::abs(s)) { entries_to_copy.emplace_back(true, i); out_values.push_back(s); } ++i; ++j; break; case 1: entries_to_copy.emplace_back(false, j); out_values.push_back(b_values(j)); ++j; break; } } #define HANDLE_LEFTOVERS(A_OR_B, IDX, IS_A) \ while (IDX < A_OR_B##_nnz) { \ entries_to_copy.emplace_back(IS_A, IDX); \ out_values.push_back(A_OR_B##_values(IDX)); \ ++IDX; \ } HANDLE_LEFTOVERS(a, i, true); HANDLE_LEFTOVERS(b, j, false); #undef HANDLE_LEFTOVERS const int64_t sum_nnz = out_values.size(); Tensor *out_indices_t, *out_values_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({sum_nnz, num_dims}), &out_indices_t)); OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({sum_nnz}), &out_values_t)); auto out_indices_mat = out_indices_t->matrix<int64_t>(); auto out_values_flat = out_values_t->vec<T>(); for (i = 0; i < sum_nnz; ++i) { const bool from_a = entries_to_copy[i].first; const int64_t idx = entries_to_copy[i].second; out_indices_mat.chip<0>(i) = from_a ? a_indices_mat.chip<0>(idx) : b_indices_mat.chip<0>(idx); } if (sum_nnz > 0) { std::copy_n(out_values.begin(), sum_nnz, &out_values_flat(0)); } ctx->set_output(2, *a_shape); } }; #define REGISTER_KERNELS(type, thresh_type) \ REGISTER_KERNEL_BUILDER( \ Name("SparseAdd").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ SparseAddOp<type, thresh_type>) REGISTER_KERNELS(float, float); REGISTER_KERNELS(double, double); REGISTER_KERNELS(int64_t, int64); REGISTER_KERNELS(int32, int32); REGISTER_KERNELS(int16, int16); REGISTER_KERNELS(int8, int8); REGISTER_KERNELS(complex64, float); REGISTER_KERNELS(complex128, double); #undef REGISTER_KERNELS }
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class SparseAddOpTest : public OpsTestBase { protected: template <typename T> void MakeOp() { DataType value_type = tensorflow::DataTypeToEnum<T>::value; DataType thresh_type = value_type; if (std::is_same<T, std::complex<float>>::value) { thresh_type = DT_FLOAT; } else if (std::is_same<T, std::complex<double>>::value) { thresh_type = DT_DOUBLE; } TF_ASSERT_OK(NodeDefBuilder("sparseadd", "SparseAdd") .Input(FakeInput(DT_INT64)) .Input(FakeInput(value_type)) .Input(FakeInput(DT_INT64)) .Input(FakeInput(DT_INT64)) .Input(FakeInput(value_type)) .Input(FakeInput(DT_INT64)) .Input(FakeInput(thresh_type)) .Attr("Treal", thresh_type) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SparseAddOpTest, TwoD_AddSparseTensorWithSelf) { MakeOp<float>(); const auto indices_shape = TensorShape({4, 2}); std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; const absl::Span<const int64_t> indices(in); std::initializer_list<int64_t> sh{3, 2}; const absl::Span<const int64_t> shape(sh); #define ADD_TENSOR_INPUT() \ AddInputFromArray<int64_t>(indices_shape, indices); \ AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4}); \ AddInputFromArray<int64_t>(TensorShape({2}), shape); ADD_TENSOR_INPUT(); ADD_TENSOR_INPUT(); AddInputFromArray<float>(TensorShape({}), {0.0}); #undef ADD_TENSOR_INPUT TF_ASSERT_OK(RunOpKernel()); Tensor expected_indices(allocator(), DT_INT64, indices_shape); test::FillValues<int64_t>(&expected_indices, indices); test::ExpectTensorEqual<int64_t>(expected_indices, *GetOutput(0)); Tensor expected_values(allocator(), DT_FLOAT, {4}); test::FillValues<float>(&expected_values, {2, 4, 6, 8}); test::ExpectTensorEqual<float>(expected_values, *GetOutput(1)); Tensor expected_shape(allocator(), DT_INT64, {static_cast<int64_t>(shape.size())}); test::FillValues<int64_t>(&expected_shape, shape); test::ExpectTensorEqual<int64_t>(expected_shape, *GetOutput(2)); } #define RUN_TEST(VALTYPE) \ TEST_F(SparseAddOpTest, TwoD_AddSparseTensorsWithDiffIndices_##VALTYPE) { \ MakeOp<VALTYPE>(); \ DataType val_dtype = tensorflow::DataTypeToEnum<VALTYPE>::value; \ \ const auto indices_shape = TensorShape({4, 2}); \ std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; \ const gtl::ArraySlice<int64_t> indices(in); \ std::initializer_list<int64_t> sh{3, 2}; \ const gtl::ArraySlice<int64_t> shape(sh); \ \ AddInputFromArray<int64_t>(indices_shape, indices); \ AddInputFromArray<VALTYPE>(TensorShape({4}), {1, 2, 3, 4}); \ AddInputFromArray<int64_t>(TensorShape({2}), shape); \ \ AddInputFromArray<int64_t>(TensorShape({2, 2}), {0, 0, 1, 1}); \ AddInputFromArray<VALTYPE>(TensorShape({2}), {5, 6}); \ AddInputFromArray<int64_t>(TensorShape({2}), shape); \ \ if (val_dtype == DT_COMPLEX64) { \ AddInputFromArray<float>(TensorShape({}), {0}); \ } else if (val_dtype == DT_COMPLEX128) { \ AddInputFromArray<double>(TensorShape({}), {0}); \ } else { \ AddInputFromArray<VALTYPE>(TensorShape({}), {0}); \ } \ \ TF_ASSERT_OK(RunOpKernel()); \ \ const int expected_nnz = 6; \ Tensor expected_indices(allocator(), DT_INT64, \ TensorShape({expected_nnz, 2})); \ test::FillValues<int64_t>(&expected_indices, \ {0, 0, 0, 1, 1, 0, 1, 1, 2, 0, 2, 1}); \ test::ExpectTensorEqual<int64_t>(expected_indices, *GetOutput(0)); \ \ Tensor expected_values(allocator(), val_dtype, {expected_nnz}); \ test::FillValues<VALTYPE>(&expected_values, {5, 1, 2, 6, 3, 4}); \ test::ExpectTensorEqual<VALTYPE>(expected_values, *GetOutput(1)); \ \ Tensor expected_shape(allocator(), DT_INT64, \ {static_cast<int64_t>(shape.size())}); \ test::FillValues<int64_t>(&expected_shape, shape); \ test::ExpectTensorEqual<int64_t>(expected_shape, *GetOutput(2)); \ } RUN_TEST(int64_t); RUN_TEST(float); RUN_TEST(double); RUN_TEST(complex64); RUN_TEST(complex128); #undef RUN_TEST #define RUN_TEST(VALTYPE, THRESH) \ TEST_F(SparseAddOpTest, TwoD_SmallValuesShouldVanish_##VALTYPE) { \ MakeOp<VALTYPE>(); \ DataType val_dtype = tensorflow::DataTypeToEnum<VALTYPE>::value; \ const auto indices_shape = TensorShape({4, 2}); \ std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; \ const gtl::ArraySlice<int64_t> indices(in); \ std::initializer_list<int64_t> sh{3, 2}; \ const gtl::ArraySlice<int64_t> shape(sh); \ \ auto AddSparseTensor = [indices, indices_shape, shape, \ this](bool negate) { \ AddInputFromArray<int64_t>(indices_shape, indices); \ if (!negate) { \ AddInputFromArray<VALTYPE>(TensorShape({4}), {1, 2, 3, 4}); \ } else { \ AddInputFromArray<VALTYPE>(TensorShape({4}), {-1, -2, -3, -4}); \ } \ AddInputFromArray<int64_t>(TensorShape({2}), shape); \ }; \ AddSparseTensor(false); \ AddSparseTensor(true); \ if (val_dtype == DT_COMPLEX64) { \ AddInputFromArray<float>(TensorShape({}), {THRESH}); \ } else if (val_dtype == DT_COMPLEX128) { \ AddInputFromArray<double>(TensorShape({}), {THRESH}); \ } else { \ AddInputFromArray<VALTYPE>(TensorShape({}), {THRESH}); \ } \ \ TF_ASSERT_OK(RunOpKernel()); \ \ Tensor expected_indices(allocator(), DT_INT64, TensorShape({0, 2})); \ test::ExpectTensorEqual<int64_t>(expected_indices, *GetOutput(0)); \ \ Tensor expected_values(allocator(), val_dtype, TensorShape({0})); \ test::ExpectTensorEqual<VALTYPE>(expected_values, *GetOutput(1)); \ \ Tensor expected_shape(allocator(), DT_INT64, \ {static_cast<int64_t>(shape.size())}); \ test::FillValues<int64_t>(&expected_shape, shape); \ test::ExpectTensorEqual<int64_t>(expected_shape, *GetOutput(2)); \ } RUN_TEST(int64_t, 1); RUN_TEST(float, 1e-3f); RUN_TEST(double, 1e-3f); RUN_TEST(complex64, 1e-3f); RUN_TEST(complex128, 1e-3f); #undef RUN_TEST } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_add_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_add_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
91d2793d-e918-49f9-b4b2-ff37091c2a24
cpp
tensorflow/tensorflow
quantize_and_dequantize_op
tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc
tensorflow/core/kernels/quantize_and_dequantize_op_test.cc
#include <cstddef> #include <vector> #include "absl/types/span.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/math.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/builder/xla_computation.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { enum QuantizerRoundMode { ROUND_HALF_UP, ROUND_HALF_TO_EVEN, }; class QuantizeAndDequantizeOp : public XlaOpKernel { public: explicit QuantizeAndDequantizeOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("signed_input", &signed_input_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("range_given", &range_given_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_)); round_mode_ = ROUND_HALF_TO_EVEN; } void Compile(XlaOpKernelContext* ctx) override { xla::XlaOp input = ctx->Input(0); const DataType data_type = ctx->input_type(0); xla::PrimitiveType xla_type; OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(data_type, &xla_type)); xla::XlaBuilder* b = ctx->builder(); xla::XlaOp min_range, max_range; if (range_given_) { min_range = ctx->Input(1); max_range = ctx->Input(2); } else { const xla::XlaComputation* fmax = ctx->GetOrCreateMax(data_type); const xla::XlaComputation* fmin = ctx->GetOrCreateMin(data_type); if (axis_ == -1) { min_range = ReduceAll(input, xla::MaxValue(b, xla_type), *fmin); max_range = ReduceAll(input, xla::MinValue(b, xla_type), *fmax); } else { std::vector<int64_t> dimensions_to_reduce; TensorShape input_shape = ctx->InputShape(0); int64_t input_rank = input_shape.dims(); OP_REQUIRES(ctx, input_rank >= 1, errors::Unimplemented("QuantizeAndDequantizeOp with axis " "!= -1 requires minimum rank 1")); OP_REQUIRES( ctx, axis_ >= 0 && axis_ < input_rank, errors::Unimplemented("QuantizeAndDequantizeOp with invalid axis")); dimensions_to_reduce.reserve(input_rank - 1); for (int64_t i = 0; i < input_rank; ++i) { if (i != axis_) { dimensions_to_reduce.push_back(i); } } min_range = Reduce(input, xla::MaxValue(b, xla_type), *fmin, dimensions_to_reduce); max_range = Reduce(input, xla::MinValue(b, xla_type), *fmax, dimensions_to_reduce); } } xla::XlaOp num_bits; if (num_bits_ < 0) { OP_REQUIRES( ctx, ctx->num_inputs() == 4, errors::Internal("Expected 4 inputs to QuantizeAndDequantize")); num_bits = ctx->Input(3); } else { num_bits = xla::ConstantR0<int32>(b, num_bits_); } const xla::XlaOp zero = XlaHelpers::Zero(b, data_type); const xla::XlaOp one = XlaHelpers::One(b, data_type); const xla::XlaOp two = XlaHelpers::FloatLiteral(b, data_type, 2.0); const xla::XlaOp half = XlaHelpers::FloatLiteral(b, data_type, 0.5); xla::XlaOp min_quantized, max_quantized; if (signed_input_) { if (narrow_range_) { min_quantized = -Pow(two, ConvertElementType( num_bits - xla::ConstantR0<int32>(b, 1), xla_type)) + one; } else { min_quantized = -Pow(two, ConvertElementType( num_bits - xla::ConstantR0<int32>(b, 1), xla_type)); } max_quantized = Pow(two, ConvertElementType(num_bits - xla::ConstantR0<int32>(b, 1), xla_type)) - one; } else { min_quantized = zero; max_quantized = Pow(two, ConvertElementType(num_bits, xla_type)) - one; } xla::XlaOp scale_from_min_side = Select(Gt(min_quantized * min_range, zero), min_quantized / min_range, xla::MaxFiniteValue(b, xla_type)); xla::XlaOp scale_from_max_side = Select(Gt(max_quantized * max_range, zero), max_quantized / max_range, xla::MaxFiniteValue(b, xla_type)); xla::XlaOp cond = Lt(scale_from_min_side, scale_from_max_side); xla::XlaOp scale = Select(cond, scale_from_min_side, scale_from_max_side); xla::XlaOp inverse_scale = Select(cond, min_range / min_quantized, max_range / max_quantized); min_range = Select(cond, min_range, min_quantized * inverse_scale); max_range = Select(cond, max_quantized * inverse_scale, max_range); xla::Shape axis_shape = b->GetShape(min_range).value(); if (!xla::ShapeUtil::IsScalar(axis_shape)) { xla::Shape input_shape = b->GetShape(input).value(); absl::Span<const int64_t> input_dimensions = input_shape.dimensions(); auto convert_to_input_shape = [&](const xla::XlaOp op) { return xla::BroadcastInDim(op, input_dimensions, {axis_}); }; min_range = convert_to_input_shape(min_range); max_range = convert_to_input_shape(max_range); scale = convert_to_input_shape(scale); inverse_scale = convert_to_input_shape(inverse_scale); } if (range_given_) { input = Clamp(min_range, input, max_range); } xla::XlaOp result; switch (round_mode_) { case ROUND_HALF_TO_EVEN: { result = xla::RoundToEven(input * scale) * inverse_scale; break; } case ROUND_HALF_UP: { result = Floor(input * scale + half) * inverse_scale; break; } } ctx->SetOutput(0, result); } protected: int64_t num_bits_ = -1; int axis_; bool signed_input_; bool range_given_; bool narrow_range_; QuantizerRoundMode round_mode_; }; class QuantizeAndDequantizeV2Op : public QuantizeAndDequantizeOp { public: explicit QuantizeAndDequantizeV2Op(OpKernelConstruction* ctx) : QuantizeAndDequantizeOp(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("num_bits", &num_bits_)); OP_REQUIRES(ctx, num_bits_ > 0 && num_bits_ < (signed_input_ ? 62 : 63), errors::InvalidArgument("num_bits is out of range: ", num_bits_, " with signed_input_ ", signed_input_)); string round_mode_string; OP_REQUIRES_OK(ctx, ctx->GetAttr("round_mode", &round_mode_string)); OP_REQUIRES( ctx, (round_mode_string == "HALF_UP" || round_mode_string == "HALF_TO_EVEN"), errors::InvalidArgument("Round mode string must be " "'HALF_UP' or " "'HALF_TO_EVEN', is '" + round_mode_string + "'")); if (round_mode_string == "HALF_UP") { round_mode_ = ROUND_HALF_UP; } else if (round_mode_string == "HALF_TO_EVEN") { round_mode_ = ROUND_HALF_TO_EVEN; } } }; REGISTER_XLA_OP(Name("QuantizeAndDequantizeV2"), QuantizeAndDequantizeV2Op); REGISTER_XLA_OP(Name("QuantizeAndDequantizeV3"), QuantizeAndDequantizeOp); REGISTER_XLA_OP(Name("QuantizeAndDequantizeV4"), QuantizeAndDequantizeV2Op); } }
#include <functional> #include <memory> #include <vector> #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { using ::tensorflow::testing::StatusIs; using ::testing::MatchesRegex; class QuantizeAndDequantizeTest : public OpsTestBase {}; struct ParameterizedQuantizeAndDequantizeTest : public OpsTestBase, public ::testing::WithParamInterface<int> {}; TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 8) .Attr("range_given", false) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1}), {-3.5}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1})); test::FillValues<float>(&expected, {-3.5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); } TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("signed_input", true) .Attr("range_given", false) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1}), {-3.5}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<int32>(TensorShape({}), {8}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1})); test::FillValues<float>(&expected, {-3.5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); } template <typename T> std::vector<T> ScalePerSliceAlongAxis(std::vector<int64_t> dims, int axis, const std::vector<T>& data) { uint32 seed = 123; int64_t out_size = 1; for (int dim : dims) { out_size *= dim; } int minor_size = 1; for (int i = axis + 1; i < dims.size(); ++i) { minor_size *= dims[i]; } std::vector<T> out(out_size); int num_slices = (axis == -1) ? 1 : dims[axis]; for (int out_idx = 0; out_idx < out_size; ++out_idx) { int in_idx = rand_r(&seed) % data.size(); int multiplier = ((out_idx / minor_size) % num_slices) + 1; out[out_idx] = data[in_idx] * multiplier; } return out; } TEST_P(ParameterizedQuantizeAndDequantizeTest, Convert_4D_tensor_with_int8) { const int axis = GetParam(); TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 8) .Attr("range_given", false) .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {2, 3, 4, 5}; AddInputFromArray<float>( TensorShape(dims), ScalePerSliceAlongAxis<float>( dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625})); const int num_slices = (axis == -1) ? 1 : dims[axis]; const TensorShape range_shape = (axis == -1) ? TensorShape({}) : TensorShape({num_slices}); std::vector<float> init_value(num_slices, 0.0f); AddInputFromArray<float>(range_shape, init_value); AddInputFromArray<float>(range_shape, init_value); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape(dims)); test::FillValues<float>( &expected, ScalePerSliceAlongAxis<float>( dims, axis, {-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128, 0.5})); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0); EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0); } } TEST_P(ParameterizedQuantizeAndDequantizeTest, Convert_4D_tensor_with_int8_round_half_up) { const int axis = GetParam(); TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 8) .Attr("range_given", false) .Attr("round_mode", "HALF_UP") .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {5, 7, 11, 13}; AddInputFromArray<float>( TensorShape(dims), ScalePerSliceAlongAxis<float>( dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625})); const int num_slices = (axis == -1) ? 1 : dims[axis]; const TensorShape range_shape = (axis == -1) ? TensorShape({}) : TensorShape({num_slices}); std::vector<float> init_value(num_slices, 0.0f); AddInputFromArray<float>(range_shape, init_value); AddInputFromArray<float>(range_shape, init_value); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape(dims)); test::FillValues<float>(&expected, ScalePerSliceAlongAxis<float>( dims, axis, {-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128, 65.0 / 128})); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0); EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0); } } TEST_P(ParameterizedQuantizeAndDequantizeTest, Convert_4D_tensor_with_int8_round_half_up_narrow_range) { const int axis = GetParam(); TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 8) .Attr("range_given", false) .Attr("round_mode", "HALF_UP") .Attr("narrow_range", true) .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {2, 3, 4, 5}; AddInputFromArray<float>( TensorShape(dims), ScalePerSliceAlongAxis<float>( dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625})); const int num_slices = (axis == -1) ? 1 : dims[axis]; const TensorShape range_shape = (axis == -1) ? TensorShape({}) : TensorShape({num_slices}); std::vector<float> init_value(num_slices, 0.0f); AddInputFromArray<float>(range_shape, init_value); AddInputFromArray<float>(range_shape, init_value); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape(dims)); test::FillValues<float>( &expected, ScalePerSliceAlongAxis<float>(dims, axis, {-1, -63.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127, 64.0 / 127})); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0); EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0); } } TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("signed_input", true) .Attr("range_given", false) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<int32>(TensorShape({}), {8}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); test::FillValues<float>(&expected, {-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); } TEST_P(ParameterizedQuantizeAndDequantizeTest, Convert_4D_tensor_with_int8_narrow_range_V3) { const int axis = GetParam(); TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("signed_input", true) .Attr("range_given", false) .Attr("narrow_range", true) .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {2, 3, 4, 5}; AddInputFromArray<float>( TensorShape(dims), ScalePerSliceAlongAxis<float>( dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625})); const int num_slices = (axis == -1) ? 1 : dims[axis]; const TensorShape range_shape = (axis == -1) ? TensorShape({}) : TensorShape({num_slices}); std::vector<float> init_value(num_slices, 0.0f); AddInputFromArray<float>(range_shape, init_value); AddInputFromArray<float>(range_shape, init_value); AddInputFromArray<int32>(TensorShape({}), {8}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape(dims)); test::FillValues<float>( &expected, ScalePerSliceAlongAxis<float>(dims, axis, {-1, -64.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127, 64.0 / 127})); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0); EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0); } } TEST_P(ParameterizedQuantizeAndDequantizeTest, GradientV4_op) { const int axis = GetParam(); TF_ASSERT_OK(NodeDefBuilder("qdq_v4_grad_op", "QuantizeAndDequantizeV4Grad") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {2, 3, 4, 5}; auto gradients = ScalePerSliceAlongAxis<float>( dims, axis, {1, -2, -3, 4, 5, 6, -7, -8, -9, -10, 11}); AddInputFromArray<float>(TensorShape(dims), gradients); auto inputs = ScalePerSliceAlongAxis<float>( dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.55, 0.6}); AddInputFromArray<float>(TensorShape(dims), inputs); const int num_slices = (axis == -1) ? 1 : dims[axis]; const TensorShape range_shape = (axis == -1) ? TensorShape({}) : TensorShape({num_slices}); std::vector<float> input_min_values(num_slices), input_max_values(num_slices); for (int i = 0; i < num_slices; ++i) { input_max_values[i] = 0.8f + i * 0.4f; input_min_values[i] = -input_max_values[i]; } AddInputFromArray<float>(range_shape, input_min_values); AddInputFromArray<float>(range_shape, input_max_values); std::vector<float> expected_vals(inputs.size()); int minor_size = 1; for (int i = axis + 1; i < dims.size(); ++i) { minor_size *= dims[i]; } for (int i = 0; i < inputs.size(); ++i) { int slice_idx = (i / minor_size) % num_slices; expected_vals[i] = ((inputs[i] >= input_min_values[slice_idx]) && (inputs[i] <= input_max_values[slice_idx])) ? gradients[i] : 0; } TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape(dims)); test::FillValues<float>(&expected, expected_vals); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } INSTANTIATE_TEST_SUITE_P(All, ParameterizedQuantizeAndDequantizeTest, ::testing::Values(-1, 1, 3)); TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 4) .Attr("range_given", false) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3125, 0.8, 0.555}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); test::FillValues<float>(&expected, {-1, -0.5, 0, 0.25, 0.75, 0.5}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); } TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_round_half_up) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 4) .Attr("range_given", false) .Attr("round_mode", "HALF_UP") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3125, 0.8, 0.555}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); test::FillValues<float>(&expected, {-1, -0.5, 0, 0.375, 0.75, 0.5}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); } TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("signed_input", true) .Attr("range_given", false) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<int32>(TensorShape({}), {4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); test::FillValues<float>(&expected, {-1, -0.5, 0, 0.25, 0.75, 0.5}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0); EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0); } TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 8) .Attr("range_given", true) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 4}), {-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33}); AddInputFromArray<float>(TensorShape({}), {-1.0}); AddInputFromArray<float>(TensorShape({}), {1.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4})); test::FillValues<float>( &expected, {-102.0 / 127, -64.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127, -128.0 / 127, 1}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given_round_half_up) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", true) .Attr("num_bits", 8) .Attr("range_given", true) .Attr("round_mode", "HALF_UP") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 4}), {-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33}); AddInputFromArray<float>(TensorShape({}), {-1.0}); AddInputFromArray<float>(TensorShape({}), {1.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4})); test::FillValues<float>( &expected, {-102.0 / 127, -63.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127, -128.0 / 127, 1}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("signed_input", true) .Attr("range_given", true) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 4}), {-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33}); AddInputFromArray<float>(TensorShape({}), {-1.0}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<int32>(TensorShape({}), {8}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4})); test::FillValues<float>( &expected, {-102.0 / 127, -64.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127, -128.0 / 127, 1}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", false) .Attr("num_bits", 8) .Attr("range_given", true) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {1.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); test::FillValues<float>(&expected, {0, 0, 76.0 / 255, 204.0 / 255}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given_round_half_up) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", false) .Attr("num_bits", 8) .Attr("range_given", true) .Attr("round_mode", "HALF_UP") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {1.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); test::FillValues<float>(&expected, {0, 0, 77.0 / 255, 204.0 / 255}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("signed_input", false) .Attr("range_given", true) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<int32>(TensorShape({}), {8}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); test::FillValues<float>(&expected, {0, 0, 76.0 / 255, 204.0 / 255}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("signed_input", false) .Attr("num_bits", 8) .Attr("range_given", false) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); test::FillValues<float>(&expected, {0, 0, 0, 0}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("signed_input", false) .Attr("range_given", false) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<int32>(TensorShape({}), {8}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1})); test::FillValues<float>(&expected, {0, 0, 0, 0}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5); } TEST_F(QuantizeAndDequantizeTest, Invalid_range_given) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("num_bits", 8) .Attr("range_given", true) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains(s.ToString(), "Invalid range: input_min 1 > input_max 0")) << s; } TEST_F(QuantizeAndDequantizeTest, Invalid_range_given_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("range_given", true) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<int32>(TensorShape({}), {8}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains(s.ToString(), "Invalid range: input_min 1 > input_max 0")) << s; } TEST_F(QuantizeAndDequantizeTest, Invalid_axis_given_V3) { TF_ASSERT_OK( NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("range_given", false) .Attr("axis", static_cast<int32_t>(-2147483648)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<int32>(TensorShape({}), {8}); EXPECT_THAT( RunOpKernel(), StatusIs( error::INVALID_ARGUMENT, MatchesRegex("Axis requested is larger than input dimensions.*"))); } #define BM_SIMPLE_QUAN_DEQUAN(DEVICE) \ static void BM_SIMPLE_QUAN_DEQUAN_##DEVICE( \ ::testing::benchmark::State& state) { \ auto root = Scope::NewRootScope().ExitOnError(); \ ops::QuantizeAndDequantizeV2(root, -3.5, -3.5, -3.5); \ TF_CHECK_OK(root.status()); \ Graph* g = new Graph(OpRegistry::Global()); \ TF_CHECK_OK(root.ToGraph(g)); \ test::Benchmark(#DEVICE, g, false).Run(state); \ } \ BENCHMARK(BM_SIMPLE_QUAN_DEQUAN_##DEVICE); BM_SIMPLE_QUAN_DEQUAN(cpu); BM_SIMPLE_QUAN_DEQUAN(gpu); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantize_and_dequantize_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4546d4dd-31ac-484a-90ac-7d1093f72fb1
cpp
tensorflow/tensorflow
sdca_ops
tensorflow/core/ops/sdca_ops.cc
tensorflow/core/kernels/sdca_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::InferenceContext; using shape_inference::ShapeHandle; static Status ApplySdcaOptimizerShapeFn(InferenceContext* c) { std::vector<ShapeHandle> sparse_handles; if (c->input("sparse_weights", &sparse_handles).ok()) { TF_RETURN_IF_ERROR( c->set_output("out_delta_sparse_weights", sparse_handles)); } std::vector<ShapeHandle> dense_handles; if (c->input("dense_weights", &dense_handles).ok()) { TF_RETURN_IF_ERROR(c->set_output("out_delta_dense_weights", dense_handles)); } return c->set_output( "out_example_state_data", {c->Matrix(InferenceContext::kUnknownDim, c->MakeDim(4))}); } REGISTER_OP("SdcaOptimizer") .Attr( "loss_type: {'logistic_loss', 'squared_loss', 'hinge_loss'," "'smooth_hinge_loss', 'poisson_loss'}") .Attr("adaptative : bool=false") .Attr("num_sparse_features: int >= 0") .Attr("num_sparse_features_with_values: int >= 0") .Attr("num_dense_features: int >= 0") .Attr("l1: float") .Attr("l2: float") .Attr("num_loss_partitions: int >= 1") .Attr("num_inner_iterations: int >= 1") .Input("sparse_example_indices: num_sparse_features * int64") .Input("sparse_feature_indices: num_sparse_features * int64") .Input("sparse_feature_values: num_sparse_features_with_values * float") .Input("dense_features: num_dense_features * float") .Input("example_weights: float") .Input("example_labels: float") .Input("sparse_indices: num_sparse_features * int64") .Input("sparse_weights: num_sparse_features * float") .Input("dense_weights: num_dense_features * float") .Input("example_state_data: float") .Output("out_example_state_data: float") .Output("out_delta_sparse_weights: num_sparse_features * float") .Output("out_delta_dense_weights: num_dense_features * float") .SetShapeFn(ApplySdcaOptimizerShapeFn); REGISTER_OP("SdcaOptimizerV2") .Attr( "loss_type: {'logistic_loss', 'squared_loss', 'hinge_loss'," "'smooth_hinge_loss', 'poisson_loss'}") .Attr("adaptive : bool=false") .Attr("num_sparse_features: int >= 0") .Attr("num_sparse_features_with_values: int >= 0") .Attr("num_dense_features: int >= 0") .Attr("l1: float") .Attr("l2: float") .Attr("num_loss_partitions: int >= 1") .Attr("num_inner_iterations: int >= 1") .Input("sparse_example_indices: num_sparse_features * int64") .Input("sparse_feature_indices: num_sparse_features * int64") .Input("sparse_feature_values: num_sparse_features_with_values * float") .Input("dense_features: num_dense_features * float") .Input("example_weights: float") .Input("example_labels: float") .Input("sparse_indices: num_sparse_features * int64") .Input("sparse_weights: num_sparse_features * float") .Input("dense_weights: num_dense_features * float") .Input("example_state_data: float") .Output("out_example_state_data: float") .Output("out_delta_sparse_weights: num_sparse_features * float") .Output("out_delta_dense_weights: num_dense_features * float") .SetShapeFn(ApplySdcaOptimizerShapeFn); REGISTER_OP("SdcaShrinkL1") .Attr("num_features: int >= 0") .Attr("l1: float") .Attr("l2: float") .Input("weights: Ref(num_features * float)") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("SdcaFprint") .Input("input: string") .Output("output: int64") .SetShapeFn([](InferenceContext* c) { ShapeHandle handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle)); ShapeHandle output_shape; TF_RETURN_IF_ERROR(c->Concatenate(handle, c->Vector(2), &output_shape)); c->set_output(0, output_shape); return absl::OkStatus(); }); }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { const SessionOptions* GetSingleThreadedOptions() { static const SessionOptions* const kSessionOptions = []() { SessionOptions* const result = new SessionOptions(); result->config.set_intra_op_parallelism_threads(1); result->config.set_inter_op_parallelism_threads(1); result->config.add_session_inter_op_thread_pool()->set_num_threads(1); return result; }(); return kSessionOptions; } const SessionOptions* GetMultiThreadedOptions() { static const SessionOptions* const kSessionOptions = []() { SessionOptions* const result = new SessionOptions(); result->config.set_intra_op_parallelism_threads(0); result->config.set_inter_op_parallelism_threads(0); result->config.add_session_inter_op_thread_pool()->set_num_threads( 0); return result; }(); return kSessionOptions; } Node* Var(Graph* const g, const int n) { return test::graph::Var(g, DT_FLOAT, TensorShape({n})); } std::vector<Node*> VarVector(Graph* const g, const int nodes, const int node_size) { std::vector<Node*> result; result.reserve(nodes); for (int i = 0; i < nodes; ++i) { result.push_back(Var(g, node_size)); } return result; } Node* Zeros(Graph* const g, const TensorShape& shape) { Tensor data(DT_FLOAT, shape); data.flat<float>().setZero(); return test::graph::Constant(g, data); } Node* Zeros(Graph* const g, const int n) { return Zeros(g, TensorShape({n})); } Node* Ones(Graph* const g, const int n) { Tensor data(DT_FLOAT, TensorShape({n})); test::FillFn<float>(&data, [](const int i) { return 1.0f; }); return test::graph::Constant(g, data); } Node* SparseIndices(Graph* const g, const int sparse_features_per_group) { Tensor data(DT_INT64, TensorShape({sparse_features_per_group})); test::FillFn<int64_t>(&data, [&](const int i) { return i; }); return test::graph::Constant(g, data); } Node* SparseExampleIndices(Graph* const g, const int sparse_features_per_group, const int num_examples) { const int x_size = num_examples * 4; Tensor data(DT_INT64, TensorShape({x_size})); test::FillFn<int64_t>(&data, [&](const int i) { return i / 4; }); return test::graph::Constant(g, data); } Node* SparseFeatureIndices(Graph* const g, const int sparse_features_per_group, const int num_examples) { const int x_size = num_examples * 4; Tensor data(DT_INT64, TensorShape({x_size})); test::FillFn<int64_t>( &data, [&](const int i) { return i % sparse_features_per_group; }); return test::graph::Constant(g, data); } Node* RandomZeroOrOne(Graph* const g, const int n) { Tensor data(DT_FLOAT, TensorShape({n})); test::FillFn<float>(&data, [](const int i) { return (random::New64() % 2) == 0 ? 0.0f : 1.0f; }); return test::graph::Constant(g, data); } Node* RandomZeroOrOneMatrix(Graph* const g, const int n, int d) { Tensor data(DT_FLOAT, TensorShape({n, d})); test::FillFn<float>(&data, [](const int i) { return (random::New64() % 2) == 0 ? 0.0f : 1.0f; }); return test::graph::Constant(g, data); } void GetGraphs(const int32_t num_examples, const int32_t num_sparse_feature_groups, const int32_t sparse_features_per_group, const int32_t num_dense_feature_groups, const int32_t dense_features_per_group, Graph** const init_g, Graph** train_g) { { Graph* g = new Graph(OpRegistry::Global()); std::vector<Node*> sparse_weight_nodes = VarVector(g, num_sparse_feature_groups, sparse_features_per_group); std::vector<Node*> dense_weight_nodes = VarVector(g, num_dense_feature_groups, dense_features_per_group); Node* const multi_zero = Zeros(g, sparse_features_per_group); for (Node* n : sparse_weight_nodes) { test::graph::Assign(g, n, multi_zero); } Node* const zero = Zeros(g, dense_features_per_group); for (Node* n : dense_weight_nodes) { test::graph::Assign(g, n, zero); } *init_g = g; } { Graph* g = new Graph(OpRegistry::Global()); std::vector<Node*> sparse_weight_nodes = VarVector(g, num_sparse_feature_groups, sparse_features_per_group); std::vector<Node*> dense_weight_nodes = VarVector(g, num_dense_feature_groups, dense_features_per_group); std::vector<NodeBuilder::NodeOut> sparse_indices; std::vector<NodeBuilder::NodeOut> sparse_weights; for (Node* n : sparse_weight_nodes) { sparse_indices.push_back( NodeBuilder::NodeOut(SparseIndices(g, sparse_features_per_group))); sparse_weights.push_back(NodeBuilder::NodeOut(n)); } std::vector<NodeBuilder::NodeOut> dense_weights; dense_weights.reserve(dense_weight_nodes.size()); for (Node* n : dense_weight_nodes) { dense_weights.push_back(NodeBuilder::NodeOut(n)); } std::vector<NodeBuilder::NodeOut> sparse_example_indices; std::vector<NodeBuilder::NodeOut> sparse_feature_indices; std::vector<NodeBuilder::NodeOut> sparse_values; sparse_example_indices.reserve(num_sparse_feature_groups); for (int i = 0; i < num_sparse_feature_groups; ++i) { sparse_example_indices.push_back(NodeBuilder::NodeOut( SparseExampleIndices(g, sparse_features_per_group, num_examples))); } sparse_feature_indices.reserve(num_sparse_feature_groups); for (int i = 0; i < num_sparse_feature_groups; ++i) { sparse_feature_indices.push_back(NodeBuilder::NodeOut( SparseFeatureIndices(g, sparse_features_per_group, num_examples))); } sparse_values.reserve(num_sparse_feature_groups); for (int i = 0; i < num_sparse_feature_groups; ++i) { sparse_values.push_back( NodeBuilder::NodeOut(RandomZeroOrOne(g, num_examples * 4))); } std::vector<NodeBuilder::NodeOut> dense_features; dense_features.reserve(num_dense_feature_groups); for (int i = 0; i < num_dense_feature_groups; ++i) { dense_features.push_back(NodeBuilder::NodeOut( RandomZeroOrOneMatrix(g, num_examples, dense_features_per_group))); } Node* const weights = Ones(g, num_examples); Node* const labels = RandomZeroOrOne(g, num_examples); Node* const example_state_data = Zeros(g, TensorShape({num_examples, 4})); Node* sdca = nullptr; TF_CHECK_OK( NodeBuilder(g->NewName("sdca"), "SdcaOptimizer") .Attr("loss_type", "logistic_loss") .Attr("num_sparse_features", num_sparse_feature_groups) .Attr("num_sparse_features_with_values", num_sparse_feature_groups) .Attr("num_dense_features", num_dense_feature_groups) .Attr("l1", 0.0) .Attr("l2", 1.0) .Attr("num_loss_partitions", 1) .Attr("num_inner_iterations", 2) .Input(sparse_example_indices) .Input(sparse_feature_indices) .Input(sparse_values) .Input(dense_features) .Input(weights) .Input(labels) .Input(sparse_indices) .Input(sparse_weights) .Input(dense_weights) .Input(example_state_data) .Finalize(g, &sdca)); *train_g = g; } } void BM_SDCA(::testing::benchmark::State& state) { const int num_examples = state.range(0); Graph* init = nullptr; Graph* train = nullptr; GetGraphs(num_examples, 20 , 5 , 1 , 20 , &init, &train); test::Benchmark("cpu", train, GetSingleThreadedOptions(), init, nullptr, "", false) .Run(state); } void BM_SDCA_LARGE_DENSE(::testing::benchmark::State& state) { const int num_examples = state.range(0); Graph* init = nullptr; Graph* train = nullptr; GetGraphs(num_examples, 0 , 0 , 5 , 200000 , &init, &train); test::Benchmark("cpu", train, GetSingleThreadedOptions(), init, nullptr, "", false) .Run(state); } void BM_SDCA_LARGE_SPARSE(::testing::benchmark::State& state) { const int num_examples = state.range(0); Graph* init = nullptr; Graph* train = nullptr; GetGraphs(num_examples, 65 , 1e6 , 0 , 0 , &init, &train); test::Benchmark("cpu", train, GetMultiThreadedOptions(), init, nullptr, "", false) .Run(state); } } BENCHMARK(BM_SDCA)->Arg(128)->Arg(256)->Arg(512)->Arg(1024); BENCHMARK(BM_SDCA_LARGE_DENSE)->Arg(128)->Arg(256)->Arg(512)->Arg(1024); BENCHMARK(BM_SDCA_LARGE_SPARSE)->Arg(128)->Arg(256)->Arg(512)->Arg(1024); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sdca_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sdca_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cdf7d9a4-a9f8-4992-8a79-05ea7b25a7ab
cpp
tensorflow/tensorflow
sparse_matmul_op
tensorflow/core/kernels/sparse_matmul_op.cc
tensorflow/core/kernels/sparse_matmul_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/sparse_matmul_op.h" #include <map> #include <memory> #include <vector> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/bfloat16.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/platform/blocking_counter.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/platform/types.h" #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) #include "xla/tsl/framework/contraction/eigen_contraction_kernel.h" #endif #define ALWAYS_INLINE EIGEN_ALWAYS_INLINE namespace tensorflow { namespace { template <typename T> using BasicMatrix = Eigen::Tensor<T, 2, Eigen::RowMajor>; template <typename T> using BasicMatrixMap = Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Aligned>; using Matrix = BasicMatrix<float>; using MatrixMap = BasicMatrixMap<float>; using CPUDevice = Eigen::ThreadPoolDevice; using DSizes = Eigen::DSizes<Eigen::DenseIndex, 2>; inline Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<0>> dsizes_00() { return Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<0>>(); } inline Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<0>> dsizes_10() { return Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<0>>(); } const int K = 64; const int M = 64; const int N = 128; template <typename T> struct SparseSlice { using ConstMatrixMap = BasicMatrixMap<const T>; public: struct Index3 { Index3(uint8 m, uint8 k1, uint8 k2, uint8 k3) : m(m), k1(k1), k2(k2), k3(k3) {} uint8 m; uint8 k1; uint8 k2; uint8 k3; }; struct Index { Index(uint8 m, uint8 k) : m(m), k(k) {} uint8 m; uint8 k; }; SparseSlice(int nrows, int ncols, int bsize) : num_rows(nrows), num_cols(ncols), block_size(bsize) { DCHECK_LE(nrows, 256); DCHECK_LE(block_size, 256); } template <bool Transpose = false> void Initialize(const ConstMatrixMap& mat, int col_offset); void Clear(); std::vector<int> index3_offset; std::vector<Index3> index3; std::vector<T> data3; std::vector<int> index_offset; std::vector<Index> index; std::vector<T> data; const int num_rows; const int num_cols; const int block_size; }; template <typename T> bool IsZero(T v); template <> ALWAYS_INLINE bool IsZero(bfloat16 v) { return !static_cast<bool>(v); } template <> ALWAYS_INLINE bool IsZero(float v) { return v == 0.0f; } template <typename T> class StridedIterator { public: StridedIterator(int stride, const T* start, const T* end) : stride_(stride), k_(0), curr_(start), end_(end) {} ALWAYS_INLINE bool Done() const { return curr_ >= end_; } ALWAYS_INLINE T Value() const { return *curr_; } ALWAYS_INLINE uint8 K() const { return k_; } ALWAYS_INLINE void Next() { curr_ += stride_; ++k_; } ALWAYS_INLINE void EatZeros() { while (curr_ < end_ && IsZero<T>(*curr_)) { Next(); } } private: const int stride_; uint8 k_; const T* curr_; const T* const end_; }; template <typename T> template <bool Transpose> void SparseSlice<T>::Initialize( const typename SparseSlice<T>::ConstMatrixMap& mat, int col_offset) { const int mat_rows = Transpose ? mat.dimension(1) : mat.dimension(0); const int mat_cols = Transpose ? mat.dimension(0) : mat.dimension(1); DCHECK_LE(num_rows, mat_rows); DCHECK_LE(num_cols + col_offset, mat_cols); int num_blocks = (num_cols + block_size - 1) / block_size; int mat_size = num_rows * num_cols; index3_offset.reserve(num_blocks); data3.reserve(mat_size); index3.reserve(mat_size / 3); index_offset.reserve(num_blocks); data.reserve(num_blocks * num_rows * 2); index.reserve(num_blocks * num_rows * 2); const int stride = Transpose ? mat.dimension(1) : 1; for (int i = 0; i < num_blocks; ++i) { int num_block_cols = std::min(block_size, num_cols - block_size * i); for (int row = 0; row < num_rows; ++row) { const uint8 m = static_cast<uint8>(row); const auto* start = Transpose ? &mat(col_offset, row) : &mat(row, col_offset); const auto* end = start + stride * num_block_cols; StridedIterator<T> iter(stride, start, end); while (true) { iter.EatZeros(); if (iter.Done()) break; const uint8 k1 = iter.K(); const T value1 = iter.Value(); iter.Next(); iter.EatZeros(); if (iter.Done()) { data.push_back(value1); index.emplace_back(m, k1); break; } const uint8 k2 = iter.K(); const T value2 = iter.Value(); iter.Next(); iter.EatZeros(); if (iter.Done()) { data.push_back(value2); index.emplace_back(m, k2); data.push_back(value1); index.emplace_back(m, k1); break; } const uint8 k3 = iter.K(); data3.push_back(value1); data3.push_back(value2); data3.push_back(iter.Value()); iter.Next(); ; index3.emplace_back(m, k1, k2, k3); } } col_offset += block_size; index3_offset.push_back(index3.size()); index_offset.push_back(index.size()); } DCHECK_EQ(index3_offset.size(), num_blocks); DCHECK_EQ(index_offset.size(), num_blocks); DCHECK_EQ(3 * index3.size(), data3.size()); DCHECK_EQ(index.size(), data.size()); } template <typename T> void SparseSlice<T>::Clear() { index3_offset.clear(); index3.clear(); data3.clear(); index_offset.clear(); index.clear(); data.clear(); } using Packet = Eigen::internal::packet_traits<float>::type; const int kNumOperands = (sizeof(Packet) / sizeof(float)); #define LOAD(x) Eigen::internal::pload<Packet>(x); #define EXPAND_BFLOAT_L(x, y) \ const auto y = Eigen::internal::pexpand_bf16_l<Packet>(x); #define EXPAND_BFLOAT_U(x, y) \ const auto y = Eigen::internal::pexpand_bf16_u<Packet>(x); #define STORE(x, y) Eigen::internal::pstore<float>(x, y); #define FMA(a, b, c, d) d = Eigen::internal::pmadd<Packet>(a, b, c); ALWAYS_INLINE float ConvertBfloat16ToFloat(const bfloat16* src) { float out = 0; auto tmp = reinterpret_cast<bfloat16*>(&out); #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ tmp[0] = *src; #else tmp[1] = *src; #endif return out; } ALWAYS_INLINE Packet ConvertFourBfloat16ToFloat(const bfloat16* src) { return Eigen::internal::pload4bf16<Packet>( reinterpret_cast<const float*>(src)); } ALWAYS_INLINE Packet ConvertTwoBfloat16ToFloat(const bfloat16* src) { return Eigen::internal::pload2bf16<Packet>( reinterpret_cast<const float*>(src)); } ALWAYS_INLINE void ScalarMulAdd(const float a, const float** inp, float** out) { **out += a * **inp; ++*inp; ++*out; } ALWAYS_INLINE void ScalarMulAdd(const float a, const bfloat16** inp, float** out) { float inp_f = ConvertBfloat16ToFloat(*inp); **out += a * inp_f; ++*inp; ++*out; } ALWAYS_INLINE void ScalarMulAdd3Way(const float a1, const float a2, const float a3, const bfloat16** inp1, const bfloat16** inp2, const bfloat16** inp3, float** out) { float inp1_f = ConvertBfloat16ToFloat(*inp1); float inp2_f = ConvertBfloat16ToFloat(*inp2); float inp3_f = ConvertBfloat16ToFloat(*inp3); **out += a1 * inp1_f + a2 * inp2_f + a3 * inp3_f; ++*out; ++*inp1; ++*inp2; ++*inp3; } ALWAYS_INLINE void ScalarMulAdd3Way(const float a1, const float a2, const float a3, const float** inp1, const float** inp2, const float** inp3, float** out) { **out += a1 * **inp1 + a2 * **inp2 + a3 * **inp3; ++*out; ++*inp1; ++*inp2; ++*inp3; } ALWAYS_INLINE void LoadSingleScalar(const bfloat16** data, Packet* l) { auto tmp = ConvertBfloat16ToFloat(*data); *l = Eigen::internal::pset1<Packet>(tmp); ++*data; } ALWAYS_INLINE void LoadTwoScalars(const bfloat16** data, Packet* l1, Packet* l2) { if (kNumOperands >= 2) { auto tmp = ConvertTwoBfloat16ToFloat(*data); *l1 = Eigen::internal::pbroadcast_first<Packet>(tmp); *l2 = Eigen::internal::pbroadcast_second<Packet>(tmp); *data += 2; } else { LoadSingleScalar(data, l1); LoadSingleScalar(data, l2); } } ALWAYS_INLINE void LoadFourScalars(const bfloat16** data, Packet* l1, Packet* l2, Packet* l3, Packet* l4) { if (kNumOperands >= 4) { auto tmp = ConvertFourBfloat16ToFloat(*data); *l1 = Eigen::internal::pbroadcast_first<Packet>(tmp); *l2 = Eigen::internal::pbroadcast_second<Packet>(tmp); *l3 = Eigen::internal::pbroadcast_third<Packet>(tmp); *l4 = Eigen::internal::pbroadcast_fourth<Packet>(tmp); *data += 4; } else { LoadTwoScalars(data, l1, l2); LoadTwoScalars(data, l3, l4); } } ALWAYS_INLINE void LoadSingleScalar(const float** data, Packet* l) { *l = Eigen::internal::pload1<Packet>(*data); ++(*data); } ALWAYS_INLINE void LoadTwoScalars(const float** data, Packet* l1, Packet* l2) { LoadSingleScalar(data, l1); LoadSingleScalar(data, l2); } ALWAYS_INLINE void LoadFourScalars(const float** data, Packet* l1, Packet* l2, Packet* l3, Packet* l4) { LoadTwoScalars(data, l1, l2); LoadTwoScalars(data, l3, l4); } template <typename T> ALWAYS_INLINE void LoadThreeScalars(const T** data, Packet* l1, Packet* l2, Packet* l3) { LoadTwoScalars(data, l1, l2); LoadSingleScalar(data, l3); } template <typename T> ALWAYS_INLINE void LoadSixScalars(const T** data, Packet* l1, Packet* l2, Packet* l3, Packet* l4, Packet* l5, Packet* l6) { LoadFourScalars(data, l1, l2, l3, l4); LoadTwoScalars(data, l5, l6); } ALWAYS_INLINE void MulAdd(const Packet a, const bfloat16** binp, float** out) { auto inp = reinterpret_cast<const float*>(*binp); const auto b = LOAD(inp); EXPAND_BFLOAT_L(b, b_0); EXPAND_BFLOAT_U(b, b_1); *binp += 2 * kNumOperands; auto c1 = LOAD(*out); auto c2 = LOAD(*out + kNumOperands); FMA(a, b_0, c1, c1); FMA(a, b_1, c2, c2); STORE(*out, c1); STORE(*out + kNumOperands, c2); *out += 2 * kNumOperands; } ALWAYS_INLINE void MulAdd3Way(const Packet a1, const Packet a2, const Packet a3, const bfloat16** binp1, const bfloat16** binp2, const bfloat16** binp3, float** out) { auto inp1 = reinterpret_cast<const float*>(*binp1); auto inp2 = reinterpret_cast<const float*>(*binp2); auto inp3 = reinterpret_cast<const float*>(*binp3); auto c1 = LOAD(*out); auto c2 = LOAD(*out + kNumOperands); const auto b1 = LOAD(inp1); EXPAND_BFLOAT_L(b1, b1_0); EXPAND_BFLOAT_U(b1, b1_1); *binp1 += 2 * kNumOperands; const auto b2 = LOAD(inp2); EXPAND_BFLOAT_L(b2, b2_0); EXPAND_BFLOAT_U(b2, b2_1); *binp2 += 2 * kNumOperands; const auto b3 = LOAD(inp3); EXPAND_BFLOAT_L(b3, b3_0); EXPAND_BFLOAT_U(b3, b3_1); *binp3 += 2 * kNumOperands; FMA(a1, b1_0, c1, c1); FMA(a1, b1_1, c2, c2); FMA(a2, b2_0, c1, c1); FMA(a2, b2_1, c2, c2); FMA(a3, b3_0, c1, c1); FMA(a3, b3_1, c2, c2); STORE(*out, c1); STORE(*out + kNumOperands, c2); *out += 2 * kNumOperands; } ALWAYS_INLINE void TwoMulAdd3Way(const Packet a1, const Packet a2, const Packet a3, const bfloat16** binp1, const bfloat16** binp2, const bfloat16** binp3, float** out) { auto inp1 = reinterpret_cast<const float*>(*binp1); auto inp2 = reinterpret_cast<const float*>(*binp2); auto inp3 = reinterpret_cast<const float*>(*binp3); auto c1 = LOAD(*out); auto c2 = LOAD(*out + kNumOperands); const auto b1 = LOAD(inp1); const auto b2 = LOAD(inp2); const auto b3 = LOAD(inp3); EXPAND_BFLOAT_L(b1, b1_0); EXPAND_BFLOAT_U(b1, b1_1); EXPAND_BFLOAT_L(b2, b2_0); EXPAND_BFLOAT_U(b2, b2_1); EXPAND_BFLOAT_L(b3, b3_0); EXPAND_BFLOAT_U(b3, b3_1); auto c3 = LOAD(*out + 2 * kNumOperands); auto c4 = LOAD(*out + 3 * kNumOperands); const auto b4 = LOAD(inp1 + kNumOperands); const auto b5 = LOAD(inp2 + kNumOperands); const auto b6 = LOAD(inp3 + kNumOperands); EXPAND_BFLOAT_L(b4, b4_0); EXPAND_BFLOAT_U(b4, b4_1); EXPAND_BFLOAT_L(b5, b5_0); EXPAND_BFLOAT_U(b5, b5_1); EXPAND_BFLOAT_L(b6, b6_0); EXPAND_BFLOAT_U(b6, b6_1); FMA(a1, b1_0, c1, c1); FMA(a1, b1_1, c2, c2); FMA(a1, b4_0, c3, c3); FMA(a1, b4_1, c4, c4); FMA(a2, b2_0, c1, c1); FMA(a2, b2_1, c2, c2); FMA(a2, b5_0, c3, c3); FMA(a2, b5_1, c4, c4); FMA(a3, b3_0, c1, c1); FMA(a3, b3_1, c2, c2); FMA(a3, b6_0, c3, c3); FMA(a3, b6_1, c4, c4); STORE(*out, c1); STORE(*out + kNumOperands, c2); STORE(*out + 2 * kNumOperands, c3); STORE(*out + 3 * kNumOperands, c4); *out += 4 * kNumOperands; *binp1 += 4 * kNumOperands; *binp2 += 4 * kNumOperands; *binp3 += 4 * kNumOperands; } ALWAYS_INLINE void MulAdd3Way128(const Packet a1, const Packet a2, const Packet a3, const bfloat16** inp1, const bfloat16** inp2, const bfloat16** inp3, float** out) { for (int k = 0; k < 128 / (8 * kNumOperands); ++k) { TwoMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); TwoMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); } } ALWAYS_INLINE void MulAdd(const Packet a, const float** inp, float** out) { const auto b = LOAD(*inp); *inp += kNumOperands; auto c = LOAD(*out); FMA(a, b, c, c); STORE(*out, c); *out += kNumOperands; } ALWAYS_INLINE void MulAdd3Way(const Packet a1, const Packet a2, const Packet a3, const float** inp1, const float** inp2, const float** inp3, float** out) { auto c = LOAD(*out); const auto b1 = LOAD(*inp1); *inp1 += kNumOperands; const auto b2 = LOAD(*inp2); *inp2 += kNumOperands; const auto b3 = LOAD(*inp3); *inp3 += kNumOperands; FMA(a1, b1, c, c); FMA(a2, b2, c, c); FMA(a3, b3, c, c); STORE(*out, c); *out += kNumOperands; } ALWAYS_INLINE void TwoMulAdd3Way(const Packet a1, const Packet a2, const Packet a3, const float** inp1, const float** inp2, const float** inp3, float** out) { auto c1 = LOAD(*out); const auto b1 = LOAD(*inp1); const auto b2 = LOAD(*inp2); const auto b3 = LOAD(*inp3); auto c2 = LOAD(*out + kNumOperands); const auto b4 = LOAD(*inp1 + kNumOperands); const auto b5 = LOAD(*inp2 + kNumOperands); const auto b6 = LOAD(*inp3 + kNumOperands); FMA(a1, b1, c1, c1); FMA(a1, b4, c2, c2); FMA(a2, b2, c1, c1); FMA(a2, b5, c2, c2); FMA(a3, b3, c1, c1); FMA(a3, b6, c2, c2); STORE(*out, c1); STORE(*out + kNumOperands, c2); *out += 2 * kNumOperands; *inp1 += 2 * kNumOperands; *inp2 += 2 * kNumOperands; *inp3 += 2 * kNumOperands; } ALWAYS_INLINE void FourMulAdd3Way(const Packet a1, const Packet a2, const Packet a3, const float** inp1, const float** inp2, const float** inp3, float** out) { TwoMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); TwoMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); } ALWAYS_INLINE void MulAdd3Way128(const Packet a1, const Packet a2, const Packet a3, const float** inp1, const float** inp2, const float** inp3, float** out) { if (kNumOperands == 8) { FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); FourMulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); } else { DCHECK_LE(4 * kNumOperands, 128); for (int i = 0; i < 128 / (4 * kNumOperands); ++i) { MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); MulAdd3Way(a1, a2, a3, inp1, inp2, inp3, out); } } } template <typename TL, typename TR, int Cols> inline void GEPP( const std::vector<SparseSlice<TL>*>& left_slices, const Eigen::TensorMap<Eigen::Tensor<const TR, 2, Eigen::RowMajor>, Eigen::Aligned>& right, const int num_cols, Matrix* output) { const int cols = (Cols == -1) ? num_cols : Cols; DCHECK_EQ(num_cols, cols); const int right_num_cols = right.dimension(1); const int output_num_cols = output->dimension(1); static const int kNumOperandsR = kNumOperands * sizeof(float) / sizeof(TR); const int cols_mod = cols % kNumOperandsR; int k_offset = 0; float* out_ptrs[M]; float* const out_start = &(*output)(0, 0); for (int j = 0; j < M; ++j) { out_ptrs[j] = out_start + output_num_cols * j; } for (const auto* left_slice : left_slices) { const auto& left = *left_slice; const auto* data3 = (!left.data3.empty()) ? &left.data3[0] : nullptr; const auto* data = (!left.data.empty()) ? &left.data[0] : nullptr; const int num_blocks = left.index3_offset.size(); int begin3 = 0; int begin = 0; for (int i = 0; i < num_blocks; ++i) { const TR* right_ptrs[K]; const auto* const right_start = &right(k_offset, 0); DCHECK_LT(k_offset, right.dimension(0)); for (int j = 0; j < K; ++j) { right_ptrs[j] = right_start + right_num_cols * j; } const int end3 = left.index3_offset[i]; int j = begin3; for (; j + 1 < end3; j += 2) { Packet l1, l2, l3, nl1, nl2, nl3; LoadSixScalars(&data3, &l1, &l2, &l3, &nl1, &nl2, &nl3); const auto& index = left.index3[j]; const auto& nindex = left.index3[j + 1]; float* out = out_ptrs[index.m]; float* nout = out_ptrs[nindex.m]; const auto* r1 = right_ptrs[index.k1]; const auto* r2 = right_ptrs[index.k2]; const auto* r3 = right_ptrs[index.k3]; const auto* nr1 = right_ptrs[nindex.k1]; const auto* nr2 = right_ptrs[nindex.k2]; const auto* nr3 = right_ptrs[nindex.k3]; if (cols == 128) { MulAdd3Way128(l1, l2, l3, &r1, &r2, &r3, &out); MulAdd3Way128(nl1, nl2, nl3, &nr1, &nr2, &nr3, &nout); } else { for (int n = 0; n < cols / kNumOperandsR; ++n) { MulAdd3Way(l1, l2, l3, &r1, &r2, &r3, &out); MulAdd3Way(nl1, nl2, nl3, &nr1, &nr2, &nr3, &nout); } const float sl1 = Eigen::internal::pfirst<Packet>(l1); const float sl2 = Eigen::internal::pfirst<Packet>(l2); const float sl3 = Eigen::internal::pfirst<Packet>(l3); const float nsl1 = Eigen::internal::pfirst<Packet>(nl1); const float nsl2 = Eigen::internal::pfirst<Packet>(nl2); const float nsl3 = Eigen::internal::pfirst<Packet>(nl3); for (int k = 0; k < cols_mod; ++k) { ScalarMulAdd3Way(sl1, sl2, sl3, &r1, &r2, &r3, &out); ScalarMulAdd3Way(nsl1, nsl2, nsl3, &nr1, &nr2, &nr3, &nout); } } } if (j < end3) { Packet l1, l2, l3; LoadThreeScalars(&data3, &l1, &l2, &l3); const auto& index = left.index3[j]; float* out = out_ptrs[index.m]; const auto* r1 = right_ptrs[index.k1]; const auto* r2 = right_ptrs[index.k2]; const auto* r3 = right_ptrs[index.k3]; if (cols == 128) { MulAdd3Way128(l1, l2, l3, &r1, &r2, &r3, &out); } else { for (int n = 0; n < cols / kNumOperandsR; ++n) { MulAdd3Way(l1, l2, l3, &r1, &r2, &r3, &out); } const float sl1 = Eigen::internal::pfirst<Packet>(l1); const float sl2 = Eigen::internal::pfirst<Packet>(l2); const float sl3 = Eigen::internal::pfirst<Packet>(l3); for (int k = 0; k < cols_mod; ++k) { ScalarMulAdd3Way(sl1, sl2, sl3, &r1, &r2, &r3, &out); } } } begin3 = end3; int end = left.index_offset[i]; j = begin; for (; j + 3 < end; j += 4) { Packet l, nl, n2l, n3l; LoadFourScalars(&data, &l, &nl, &n2l, &n3l); const auto& index = left.index[j]; const auto& nindex = left.index[j + 1]; const auto& n2index = left.index[j + 2]; const auto& n3index = left.index[j + 3]; const auto* r = right_ptrs[index.k]; const auto* nr = right_ptrs[nindex.k]; const auto* n2r = right_ptrs[n2index.k]; const auto* n3r = right_ptrs[n3index.k]; float* out = out_ptrs[index.m]; float* nout = out_ptrs[nindex.m]; float* n2out = out_ptrs[n2index.m]; float* n3out = out_ptrs[n3index.m]; for (int n = 0; n < cols / kNumOperandsR; ++n) { MulAdd(l, &r, &out); MulAdd(nl, &nr, &nout); MulAdd(n2l, &n2r, &n2out); MulAdd(n3l, &n3r, &n3out); } const float sl1 = Eigen::internal::pfirst<Packet>(l); const float sl2 = Eigen::internal::pfirst<Packet>(nl); const float sl3 = Eigen::internal::pfirst<Packet>(n2l); const float sl4 = Eigen::internal::pfirst<Packet>(n3l); for (int k = 0; k < cols_mod; ++k) { ScalarMulAdd(sl1, &r, &out); ScalarMulAdd(sl2, &nr, &nout); ScalarMulAdd(sl3, &n2r, &n2out); ScalarMulAdd(sl4, &n3r, &n3out); } } while (j < end) { Packet l; LoadSingleScalar(&data, &l); const auto& index = left.index[j]; const auto* r = right_ptrs[index.k]; float* out = out_ptrs[index.m]; for (int n = 0; n < cols / kNumOperandsR; ++n) { MulAdd(l, &r, &out); } const float sl = Eigen::internal::pfirst<Packet>(l); for (int k = 0; k < cols_mod; ++k) { ScalarMulAdd(sl, &r, &out); } j++; } k_offset += left.block_size; begin = end; } } } #undef LOAD #undef EXPAND_BFLOAT_L #undef EXPAND_BFLOAT_U #undef STORE #undef FMA } template <typename TL, typename TR> class SparseMatMul { using MatrixL = BasicMatrix<TL>; using MatrixR = BasicMatrix<TR>; using ConstMatrixMapL = BasicMatrixMap<const TL>; using ConstMatrixMapR = BasicMatrixMap<const TR>; using MatrixMapR = BasicMatrixMap<TR>; public: public: static inline void Compute(const ConstMatrixMapL& left, const ConstMatrixMapR& right, bool transpose_left, const DeviceBase::CpuWorkerThreads* thread_pool, bool transpose_output, MatrixMap* output); private: static inline void ComputeOutputBlock( const std::vector<SparseSlice<TL>*>& left, const ConstMatrixMapR& right, int num_cols, int output_row_offset, int output_col_offset, bool assign, bool transpose_output, MatrixMap* output); static inline std::unique_ptr<BlockingCounter> CreateSparseSlices( const ConstMatrixMapL& mat, bool transpose, int slice_num_rows, int slice_block_size, int slice_num_cols, std::vector<std::vector<SparseSlice<TL>*>>* mat_slices, const DeviceBase::CpuWorkerThreads* thread_pool); static inline std::unique_ptr<BlockingCounter> CreateDenseSlices( const ConstMatrixMapR& mat, int row_start, int num_rows, int col_start, int num_cols, const DeviceBase::CpuWorkerThreads* thread_pool, MatrixR* buffer, std::vector<ConstMatrixMapR*>* slices); static inline BlockingCounter* ShuffleMatrix( const ConstMatrixMapR& mat, int slice_row_start, int slice_num_rows, int slice_col_start, int slice_num_cols, const int N, const DeviceBase::CpuWorkerThreads* thread_pool, MatrixR* buffer); static inline void SliceMatrix(const MatrixR& mat, const int num_rows, const int num_slices, std::vector<ConstMatrixMapR*>* slices); static inline void ComputeBlockSizes(const ConstMatrixMapL& left, const ConstMatrixMapR& right, bool transpose_left, int num_threads, int* KR, int* NR, int* KL, int* JB, int* IB); SparseMatMul(const SparseMatMul&) = delete; void operator=(const SparseMatMul&) = delete; }; template <typename TL, typename TR, template <typename TL2, typename TR2> class DoMatMul> class SparseMatMulOp : public OpKernel { using MatrixR = BasicMatrix<TR>; using ConstMatrixMapR = BasicMatrixMap<const TR>; public: explicit SparseMatMulOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_a", &transpose_a_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_b", &transpose_b_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("a_is_sparse", &a_is_sparse_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("b_is_sparse", &b_is_sparse_)); } void Compute(OpKernelContext* ctx) override { const Tensor& a = ctx->input(0); const Tensor& b = ctx->input(1); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a.shape()), errors::InvalidArgument("a is not a matrix")); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b.shape()), errors::InvalidArgument("b is not a matrix")); const int m = transpose_a_ ? a.dim_size(1) : a.dim_size(0); const int k = transpose_a_ ? a.dim_size(0) : a.dim_size(1); const int n = transpose_b_ ? b.dim_size(0) : b.dim_size(1); const int k2 = transpose_b_ ? b.dim_size(1) : b.dim_size(0); OP_REQUIRES(ctx, k == k2, errors::InvalidArgument( "Matrix size incompatible: a: ", a.shape().DebugString(), ", b: ", b.shape().DebugString())); OP_REQUIRES(ctx, m >= 0 && n >= 0 && k >= 0, errors::InvalidArgument( "Matrix dimensions cannot be negative: a: ", a.shape().DebugString(), ", b: ", b.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({m, n}), &output)); if (m == 0 || n == 0) { return; } if (k == 0) { functor::SetZeroFunctor<CPUDevice, float> f; f(ctx->eigen_device<CPUDevice>(), output->flat<float>()); return; } auto out = output->matrix<float>(); std::unique_ptr<Tensor> a_float; std::unique_ptr<Tensor> b_float; if (!a_is_sparse_ && !b_is_sparse_) { auto left = &a; auto right = &b; if (std::is_same<TL, bfloat16>::value) { a_float.reset(new Tensor(DT_FLOAT, a.shape())); BFloat16ToFloat(a.flat<bfloat16>().data(), a_float->flat<float>().data(), a.NumElements()); left = a_float.get(); } if (std::is_same<TR, bfloat16>::value) { b_float.reset(new Tensor(DT_FLOAT, b.shape())); BFloat16ToFloat(b.flat<bfloat16>().data(), b_float->flat<float>().data(), b.NumElements()); right = b_float.get(); } Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair; dim_pair[0].first = transpose_a_ ? 0 : 1; dim_pair[0].second = transpose_b_ ? 1 : 0; out.device(ctx->template eigen_device<CPUDevice>()) = left->matrix<float>().contract(right->matrix<float>(), dim_pair); return; } auto left = &a; auto right = &b; bool transpose_output = false; bool transpose_a = transpose_a_; bool transpose_b = transpose_b_; if (!a_is_sparse_) { std::swap(left, right); std::swap(transpose_a, transpose_b); transpose_a = !transpose_a; transpose_b = !transpose_b; transpose_output = !transpose_output; } std::unique_ptr<Tensor> right_tr; if (transpose_b) { OP_REQUIRES(ctx, right->dim_size(0) != 0, errors::InvalidArgument("b has an entry 0 in it's shape.")); OP_REQUIRES(ctx, right->dim_size(1) != 0, errors::InvalidArgument("b has an entry 0 in it's shape.")); right_tr.reset( new Tensor(right->dtype(), TensorShape({right->dim_size(1), right->dim_size(0)}))); const auto perm = dsizes_10(); if (transpose_output) { right_tr->matrix<TL>().device(ctx->template eigen_device<CPUDevice>()) = right->matrix<TL>().shuffle(perm); } else { right_tr->matrix<TR>().device(ctx->template eigen_device<CPUDevice>()) = right->matrix<TR>().shuffle(perm); } right = right_tr.get(); } if (transpose_output) { DoMatMul<TR, TL>::Compute(left->matrix<TR>(), right->matrix<TL>(), transpose_a, ctx->device()->tensorflow_cpu_worker_threads(), transpose_output, &out); } else { DoMatMul<TL, TR>::Compute(left->matrix<TL>(), right->matrix<TR>(), transpose_a, ctx->device()->tensorflow_cpu_worker_threads(), transpose_output, &out); } } private: bool transpose_a_; bool transpose_b_; bool a_is_sparse_; bool b_is_sparse_; SparseMatMulOp(const SparseMatMulOp&) = delete; void operator=(const SparseMatMulOp&) = delete; }; template <typename TL, typename TR> inline void SparseMatMul<TL, TR>::ComputeOutputBlock( const std::vector<SparseSlice<TL>*>& left, const typename SparseMatMul<TL, TR>::ConstMatrixMapR& right, int num_cols, int output_row_offset, int output_col_offset, bool assign, bool transpose_output, MatrixMap* output) { const auto perm = dsizes_10(); int num_rows = left[0]->num_rows; const int rhs_num_cols = right.dimension(1); DCHECK_LE(num_cols, rhs_num_cols); Matrix out(num_rows, rhs_num_cols); out.setZero(); if (num_cols == N) { GEPP<TL, TR, N>(left, right, num_cols, &out); } else { GEPP<TL, TR, -1>(left, right, num_cols, &out); } if (!assign) { const DSizes begin(output_row_offset, output_col_offset); const DSizes sizes(num_rows, num_cols); if (transpose_output) { if (num_cols == rhs_num_cols) { output->shuffle(perm).slice(begin, sizes) += out; } else { const auto zero = dsizes_00(); output->shuffle(perm).slice(begin, sizes) += out.slice(zero, sizes); } } else { if (num_cols == rhs_num_cols) { output->slice(begin, sizes) += out; } else { const auto zero = dsizes_00(); output->slice(begin, sizes) += out.slice(zero, sizes); } } } else { std::unique_ptr<Matrix> out_tr; if (transpose_output) { out_tr.reset(new Matrix(rhs_num_cols, num_rows)); *out_tr = out.shuffle(perm); std::swap(output_row_offset, output_col_offset); std::swap(num_rows, num_cols); } const Matrix& final_out = transpose_output ? *out_tr : out; for (int i = 0; i < num_rows; ++i) { memcpy(&(*output)(output_row_offset + i, output_col_offset), &final_out(i, 0), num_cols * sizeof(float)); } } } template <typename TL, typename TR> inline std::unique_ptr<BlockingCounter> SparseMatMul<TL, TR>::CreateSparseSlices( const typename SparseMatMul<TL, TR>::ConstMatrixMapL& mat, bool transpose, int slice_num_rows, int slice_block_size, int slice_num_cols, std::vector<std::vector<SparseSlice<TL>*>>* mat_slices, const DeviceBase::CpuWorkerThreads* thread_pool) { const int mat_num_rows = transpose ? mat.dimension(1) : mat.dimension(0); const int mat_num_cols = transpose ? mat.dimension(0) : mat.dimension(1); const int num_slices_dim0 = std::max(1, (mat_num_rows + slice_num_rows - 1) / slice_num_rows); const int num_slices_dim1 = std::max(1, (mat_num_cols + slice_num_cols - 1) / slice_num_cols); mat_slices->resize(num_slices_dim0); BlockingCounter* counter = new BlockingCounter(num_slices_dim0 * num_slices_dim1); auto work = [counter, transpose](SparseSlice<TL>* sparse_slice, SparseMatMul<TL, TR>::ConstMatrixMapL* slice, int col_offset) { if (transpose) { sparse_slice->template Initialize<true>(*slice, col_offset); } else { sparse_slice->template Initialize<false>(*slice, col_offset); } delete slice; counter->DecrementCount(); }; for (int i = 0; i < num_slices_dim0; ++i) { (*mat_slices)[i].resize(num_slices_dim1); int num_rows = std::min<int>(slice_num_rows, mat_num_rows - i * slice_num_rows); for (int j = 0; j < num_slices_dim1; ++j) { int num_cols = std::min<int>(slice_num_cols, mat_num_cols - j * slice_num_cols); SparseMatMul<TL, TR>::ConstMatrixMapL* slice = nullptr; if (transpose) { slice = new SparseMatMul<TL, TR>::ConstMatrixMapL( &mat(0, i * slice_num_rows), mat.dimensions()); } else { DSizes d(num_rows, mat_num_cols); slice = new SparseMatMul<TL, TR>::ConstMatrixMapL( &mat(i * slice_num_rows, 0), d); } auto* sparse_slice = new SparseSlice<TL>(num_rows, num_cols, slice_block_size); (*mat_slices)[i][j] = sparse_slice; thread_pool->workers->Schedule( [=]() { work(sparse_slice, slice, slice_num_cols * j); }); } } return std::unique_ptr<BlockingCounter>(counter); } #define LOAD(x) Eigen::internal::ploadu<Packet>((x)); #define INTERLEAVE(x) Eigen::internal::pinterleave4x64<Packet>(x); #define STORE(x, y) Eigen::internal::pstoreu<float>(x, y); template <int NUM_ELEM = -1> ALWAYS_INLINE void CopyAndMayBeInterleaveBfloat16(void* bdst, const void* bsrc, int num_elements) { DCHECK_GE(kNumOperands, 8); static const int kStep = kNumOperands * sizeof(float) / sizeof(bfloat16); const int num = (NUM_ELEM == -1) ? num_elements : NUM_ELEM; DCHECK_EQ(num, num_elements); const float* src = reinterpret_cast<const float*>(bsrc); float* dst = reinterpret_cast<float*>(bdst); for (int index = 0; index + kStep <= num; index += kStep) { auto in = LOAD(src); auto tmp = INTERLEAVE(in); STORE(dst, tmp); src += kNumOperands; dst += kNumOperands; } if (num % kStep != 0) { memcpy(reinterpret_cast<void*>(dst), reinterpret_cast<const void*>(src), (num % kStep) * sizeof(bfloat16)); } } template <typename T> ALWAYS_INLINE void CopyAndMayBeInterleave(void* dst, const void* src, int num_elements) { if (std::is_same<T, float>::value || kNumOperands < 8) { memcpy(dst, src, num_elements * sizeof(T)); } else if (std::is_same<T, bfloat16>::value) { if (num_elements == N) { CopyAndMayBeInterleaveBfloat16<N>(dst, src, num_elements); } else { CopyAndMayBeInterleaveBfloat16<-1>(dst, src, num_elements); } } else { LOG(FATAL) << "Unsupported type"; } } #undef LOAD #undef Interleave #undef Store template <typename TL, typename TR> inline BlockingCounter* SparseMatMul<TL, TR>::ShuffleMatrix( const typename SparseMatMul<TL, TR>::ConstMatrixMapR& mat, int slice_row_start, int slice_num_rows, int slice_col_start, int slice_num_cols, const int N, const DeviceBase::CpuWorkerThreads* thread_pool, MatrixR* buffer) { DCHECK_EQ(N % 2, 0); DCHECK_LE(kNumOperands * sizeof(float) / sizeof(TR), N); int num_threads = std::min(thread_pool->num_threads, 8); BlockingCounter* counter = new BlockingCounter(num_threads); DCHECK_EQ(N, buffer->dimension(1)); auto shuffle_work = [&mat, slice_row_start, slice_num_rows, slice_col_start, slice_num_cols, N, buffer, counter](int s, int e) { const int row_start = s % slice_num_rows + slice_row_start; const int col_start = s / slice_num_rows * N + slice_col_start; auto* out_start = &(*buffer)(s, 0); const auto* input_start = &mat(row_start, col_start); const auto* input_end = &mat(slice_row_start + slice_num_rows - 1, slice_col_start + slice_num_cols - 1); const int mat_num_cols = mat.dimension(1); const int row_slice_size = slice_num_rows * mat_num_cols; const int aligned_end = slice_num_cols / N * slice_num_rows; const int e1 = std::min(e, aligned_end); while (s < e1) { CopyAndMayBeInterleave<TR>(out_start, input_start, N); out_start += N; input_start += mat_num_cols; if (input_start > input_end) { input_start = input_start - row_slice_size + N; } ++s; } int s1 = std::max(s, aligned_end); const int copy_num_cols = slice_num_cols % N; while (s1 < e) { CopyAndMayBeInterleave<TR>(out_start, input_start, copy_num_cols); out_start += N; input_start += mat_num_cols; ++s1; } if (counter) counter->DecrementCount(); }; int start = 0; int end = 0; int num_out_rows = (slice_num_cols + N - 1) / N * slice_num_rows; DCHECK_LE(num_out_rows, buffer->dimension(0)); for (int i = std::max(1, num_threads); i > 0; --i) { end = start + num_out_rows / i; thread_pool->workers->Schedule([=]() { shuffle_work(start, end); }); num_out_rows -= (end - start); start = end; } return counter; } template <typename TL, typename TR> inline void SparseMatMul<TL, TR>::SliceMatrix( const MatrixR& mat, const int num_rows, const int num_slices, std::vector<typename SparseMatMul<TL, TR>::ConstMatrixMapR*>* slices) { slices->resize(num_slices); DSizes d(num_rows, mat.dimension(1)); DCHECK_LE(num_rows * num_slices, mat.dimension(0)); for (int i = 0; i < num_slices; ++i) { (*slices)[i] = new ConstMatrixMapR(&mat(i * num_rows, 0), d); } } template <typename TL, typename TR> inline std::unique_ptr<BlockingCounter> SparseMatMul<TL, TR>::CreateDenseSlices( const typename SparseMatMul<TL, TR>::ConstMatrixMapR& mat, int row_start, int num_rows, int col_start, int num_cols, const DeviceBase::CpuWorkerThreads* thread_pool, MatrixR* buffer, std::vector<typename SparseMatMul<TL, TR>::ConstMatrixMapR*>* slices) { std::unique_ptr<BlockingCounter> shuffle_counter(ShuffleMatrix( mat, row_start, num_rows, col_start, num_cols, N, thread_pool, buffer)); const int num_slices = (num_cols + N - 1) / N; SliceMatrix(*buffer, num_rows, num_slices, slices); return shuffle_counter; } template <typename TL, typename TR> inline void SparseMatMul<TL, TR>::ComputeBlockSizes( const typename SparseMatMul<TL, TR>::ConstMatrixMapL& left, const typename SparseMatMul<TL, TR>::ConstMatrixMapR& right, bool transpose_left, int num_threads, int* KR, int* NR, int* KL, int* JB, int* IB) { const int est_num_cores = std::max(1, (num_threads + 1) / 2); const int mem = est_num_cores * 128 * 1024; *KR = std::min(static_cast<int>(right.dimension(0)), mem / 256); *NR = right.dimension(1); if (*KR * *NR > mem) { *KR = std::min<int>(*KR, 4096); } *KR = std::max(1, *KR / K) * K; *NR = std::max(1, *NR / 256) * 256; if (*KR * *NR > mem) { *NR = mem / *KR; } *NR = std::max(1, *NR / 256) * 256; const int left_dim0 = transpose_left ? left.dimension(1) : left.dimension(0); const int left_dim1 = transpose_left ? left.dimension(0) : left.dimension(1); for (*KL = 1024; *KL > K; *KL /= 2) { if (*KR % *KL == 0 && std::max<int>(1, left_dim0 / 64) * (left_dim1 / *KL) > est_num_cores) { break; } } DCHECK_EQ(*KL % K, 0); DCHECK_GE(*KR, *KL); if (*KR < right.dimension(0)) { CHECK_EQ(*KR % *KL, 0); } *JB = std::max(1, static_cast<int>(sqrt(num_threads) / 2.0)); *IB = 8 * *JB; DCHECK_EQ(N * sizeof(float) % 64, size_t{0}); } template <typename TL, typename TR> inline void SparseMatMul<TL, TR>::Compute( const typename SparseMatMul<TL, TR>::ConstMatrixMapL& left, const typename SparseMatMul<TL, TR>::ConstMatrixMapR& right, bool transpose_left, const DeviceBase::CpuWorkerThreads* thread_pool, bool transpose_output, MatrixMap* output) { const int num_threads = thread_pool->num_threads; int KR, NR, KL, JB, IB; ComputeBlockSizes(left, right, transpose_left, num_threads, &KR, &NR, &KL, &JB, &IB); std::vector<std::vector<SparseSlice<TL>*>> left_slices; std::unique_ptr<BlockingCounter> sparse_slice_counter = CreateSparseSlices(ConstMatrixMapL(left.data(), left.dimensions()), transpose_left, M, K, KL, &left_slices, thread_pool); const int num_left_slices = left_slices.size(); const int right_dim0 = right.dimension(0); const int right_dim1 = right.dimension(1); const int buffer_num_rows = std::min(KR, right_dim0) * ((std::min(NR, right_dim1) + N - 1) / N); MatrixR buffer(buffer_num_rows, N); std::vector<ConstMatrixMapR*> right_slices; std::vector<SparseSlice<TL>*> block_left_slices; std::vector<std::function<void(void)>> tasks; const int num_k_blocks = (right_dim0 + KR - 1) / KR; const int num_n_blocks = (right_dim1 + NR - 1) / NR; std::unique_ptr<BlockingCounter> dense_slice_counter; for (int nb = 0; nb < num_n_blocks; ++nb) { const int right_num_cols = std::min(NR, static_cast<int>(right_dim1 - NR * nb)); for (int kb = 0; kb < num_k_blocks; ++kb) { const int right_num_rows = std::min(KR, static_cast<int>(right_dim0 - KR * kb)); dense_slice_counter = CreateDenseSlices( right, kb * KR, right_num_rows, nb * NR, right_num_cols, thread_pool, &buffer, &right_slices); const int num_right_slices = right_slices.size(); tasks.reserve(num_left_slices * num_right_slices); for (int j_outer = 0; j_outer < num_right_slices; j_outer += JB) { for (int i_outer = 0; i_outer < num_left_slices; i_outer += IB) { for (int j_inner = j_outer; j_inner < std::min(num_right_slices, j_outer + JB); ++j_inner) { const int num_cols = std::min(N, right_num_cols - N * j_inner); for (int i_inner = i_outer; i_inner < std::min(num_left_slices, i_outer + IB); ++i_inner) { block_left_slices.clear(); int begin = kb * KR / KL; int end = std::min<int>((kb + 1) * KR / KL, (right.dimension(0) + KL - 1) / KL); DCHECK_LT(begin, end); block_left_slices.insert(block_left_slices.begin(), left_slices[i_inner].begin() + begin, left_slices[i_inner].begin() + end); tasks.push_back(std::bind( &ComputeOutputBlock, block_left_slices, std::ref(*right_slices[j_inner]), num_cols, M * i_inner, N * j_inner + nb * NR, kb == 0, transpose_output, output)); } } } } if (sparse_slice_counter) { sparse_slice_counter->Wait(); sparse_slice_counter.reset(nullptr); } if (dense_slice_counter) { dense_slice_counter->Wait(); dense_slice_counter.reset(nullptr); } BlockingCounter bc(tasks.size()); for (const auto& t : tasks) { thread_pool->workers->Schedule([&bc, &t]() { t(); bc.DecrementCount(); }); } bc.Wait(); tasks.clear(); for (auto& temp : right_slices) { delete temp; } right_slices.clear(); } } for (auto& left_slice : left_slices) { for (auto& temp : left_slice) { delete temp; } left_slice.clear(); } } #define REGISTER_SPARSE_MATMUL(TA, TB) \ REGISTER_KERNEL_BUILDER(Name("SparseMatMul") \ .Device(DEVICE_CPU) \ .TypeConstraint<TA>("Ta") \ .TypeConstraint<TB>("Tb"), \ SparseMatMulOp<TA, TB, SparseMatMul>); REGISTER_SPARSE_MATMUL(float, float); REGISTER_SPARSE_MATMUL(bfloat16, bfloat16); REGISTER_SPARSE_MATMUL(float, bfloat16); REGISTER_SPARSE_MATMUL(bfloat16, float); #undef REGISTER_SPARSE_MATMUL }
#include "tensorflow/core/kernels/sparse_matmul_op.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/bfloat16.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { random::PhiloxRandom philox(1, 1); random::SimplePhilox rnd(&philox); using Eigen::operator==; template <typename T> void Sparsify(Tensor* t, float sparsity) { const int64_t N = t->NumElements(); CHECK_LE(sparsity, 1); auto flat = t->flat<T>(); if (sparsity == 1) { flat.setZero(); return; } static const uint32 K = 10000; for (int64_t i = 0; i < N; ++i) { if (rnd.Uniform(K) < sparsity * K) { flat(i) = T(0); } else if (flat(i) == T(0)) { flat(i) = T(1); } } } Node* SparseMatMulNode(Graph* g, Node* in0, Node* in1, bool transpose_a, bool transpose_b, bool a_sparse, bool b_sparse) { Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "SparseMatMul") .Input(in0) .Input(in1) .Attr("transpose_a", transpose_a) .Attr("transpose_b", transpose_b) .Attr("a_is_sparse", a_sparse) .Attr("b_is_sparse", b_sparse) .Finalize(g, &ret)); return ret; } template <typename TA, typename TB> static Graph* SparseMatMulHelper(Graph* g, int m, int n, int d, float sparsity_a, float sparsity_b, bool transpose_a, bool transpose_b) { bool a_sparse = (sparsity_a > 0); bool b_sparse = (sparsity_b > 0); auto left_shape = transpose_a ? TensorShape({d, m}) : TensorShape({m, d}); Tensor left(DataTypeToEnum<TA>::value, left_shape); left.flat<TA>().setRandom(); Sparsify<TA>(&left, sparsity_a); auto right_shape = transpose_b ? TensorShape({n, d}) : TensorShape({d, n}); Tensor right(DataTypeToEnum<TB>::value, right_shape); right.flat<TB>().setRandom(); Sparsify<TB>(&right, sparsity_b); SparseMatMulNode(g, test::graph::Constant(g, left), test::graph::Constant(g, right), transpose_a, transpose_b, a_sparse, b_sparse); return g; } template <typename TA, typename TB> static Graph* SparseMatMul(int m, int n, int d, float sparsity_a, float sparsity_b, bool transpose_a, bool transpose_b) { Graph* g = new Graph(OpRegistry::Global()); return SparseMatMulHelper<TA, TB>(g, m, n, d, sparsity_a, sparsity_b, transpose_a, transpose_b); } static Graph* ReplicatedSparseMatMul(int m, int n, int d, float sparsity_1, float sparsity_2, int copies) { Graph* g = new Graph(OpRegistry::Global()); for (int i = 0; i < copies; ++i) { SparseMatMulHelper<float, float>(g, m, n, d, sparsity_1, sparsity_2, false, false); } return g; } #define BM_SPARSE(M, K, N, S1, S2, TRA, TRB, TA, TB) \ static void \ BM_Sparse##_##M##_##K##_##N##_##S1##_##S2##_##TRA##_##TRB##_##TA##_##TB( \ ::testing::benchmark::State& state) { \ auto label = strings::Printf("tr_a: %d tr_b: %d sp_a: %0.2f sp_b: %0.2f", \ TRA, TRB, S1 / 100.0, S2 / 100.0); \ state.SetLabel(label); \ auto g = SparseMatMul<TA, TB>(M, N, K, S1 / 100.0, S2 / 100.0, TRA, TRB); \ test::Benchmark("cpu", g, false).Run(state); \ } \ BENCHMARK( \ BM_Sparse##_##M##_##K##_##N##_##S1##_##S2##_##TRA##_##TRB##_##TA##_##TB) \ ->UseRealTime(); #define BM_SPARSE_REPLICATED(M, K, N, S1, S2, Copies) \ static void BM_Sparse_replicated##_##M##_##K##_##N##_##S1##_##S2##_##Copies( \ ::testing::benchmark::State& state) { \ auto label = strings::Printf("copies: %d sp_a: %0.2f sp_b: %0.2f", \ (Copies), S1 / 100.0, S2 / 100.0); \ state.SetLabel(label); \ auto g = \ ReplicatedSparseMatMul(M, N, K, S1 / 100.0, S2 / 100.0, (Copies)); \ test::Benchmark("cpu", g, false).Run(state); \ state.SetItemsProcessed(state.iterations() * M * K * N * Copies * 2); \ } \ BENCHMARK(BM_Sparse_replicated##_##M##_##K##_##N##_##S1##_##S2##_##Copies) \ ->UseRealTime(); #define BM_SPARSE_FLOAT(M, K, N, S1, S2, TRA, TRB) \ BM_SPARSE(M, K, N, S1, S2, TRA, TRB, float, float) #define BM_SPARSE_BFLOAT16(M, K, N, S1, S2, TRA, TRB) \ BM_SPARSE(M, K, N, S1, S2, TRA, TRB, bfloat16, bfloat16) #define BM_SPARSE_FLOAT_BFLOAT16(M, K, N, S1, S2, TRA, TRB) \ BM_SPARSE(M, K, N, S1, S2, TRA, TRB, float, bfloat16) #define BM_SPARSE_BFLOAT16_FLOAT(M, K, N, S1, S2, TRA, TRB) \ BM_SPARSE(M, K, N, S1, S2, TRA, TRB, bfloat16, float) BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 0, false, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 1, 0, false, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 50, 0, false, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, false, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 99, 0, false, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 50, false, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, false, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, true, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, false, true); BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, true, true); BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, true, false); BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, false, true); BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, true, true); BM_SPARSE_FLOAT(1024, 1024, 1024, 0, 0, false, false); BM_SPARSE_FLOAT(1024, 1024, 1024, 1, 0, false, false); BM_SPARSE_FLOAT(1024, 1024, 1024, 85, 0, false, false); BM_SPARSE_FLOAT(256, 256, 256, 1, 0, false, false); BM_SPARSE_FLOAT(512, 512, 512, 1, 0, false, false); BM_SPARSE_FLOAT(2560, 400, 1024, 85, 0, false, false); BM_SPARSE_FLOAT(2560, 400, 1024, 85, 0, true, false); BM_SPARSE_FLOAT(400, 800, 2560, 85, 0, false, false); BM_SPARSE_FLOAT(400, 2560, 1024, 85, 0, false, false); BM_SPARSE_FLOAT(400, 1024, 256, 85, 0, false, false); BM_SPARSE_FLOAT(400, 256, 1, 85, 0, false, false); BM_SPARSE_REPLICATED(400, 800, 2560, 85, 0, 6); BM_SPARSE_REPLICATED(400, 2560, 1024, 85, 0, 6); BM_SPARSE_REPLICATED(400, 1024, 256, 85, 0, 6); BM_SPARSE_REPLICATED(400, 256, 1, 85, 0, 6); BM_SPARSE_FLOAT(2048, 1792, 1024, 85, 0, false, false); BM_SPARSE_FLOAT(2048, 1024, 768, 85, 0, false, false); BM_SPARSE_FLOAT(2048, 768, 512, 85, 0, false, false); BM_SPARSE_FLOAT(2048, 512, 256, 85, 0, false, false); BM_SPARSE_FLOAT(2049, 1792, 1024, 85, 0, false, false); BM_SPARSE_FLOAT(2049, 1024, 768, 85, 0, false, false); BM_SPARSE_FLOAT(2049, 768, 512, 85, 0, false, false); BM_SPARSE_FLOAT(2049, 512, 256, 85, 0, false, false); BM_SPARSE_REPLICATED(2048, 1792, 1024, 85, 0, 6); BM_SPARSE_REPLICATED(2048, 1024, 768, 85, 0, 6); BM_SPARSE_REPLICATED(2048, 768, 512, 85, 0, 6); BM_SPARSE_REPLICATED(2048, 512, 256, 85, 0, 6); BM_SPARSE_BFLOAT16(2048, 2048, 2048, 0, 0, false, false); BM_SPARSE_BFLOAT16(2048, 2048, 2048, 1, 0, false, false); BM_SPARSE_BFLOAT16(2048, 2048, 2048, 85, 0, false, false); BM_SPARSE_BFLOAT16(2048, 2048, 2048, 99, 0, false, false); BM_SPARSE_BFLOAT16_FLOAT(2048, 2048, 2048, 85, 0, false, false); BM_SPARSE_BFLOAT16_FLOAT(2048, 2048, 2048, 99, 0, false, false); BM_SPARSE_FLOAT_BFLOAT16(2048, 2048, 2048, 85, 0, false, false); BM_SPARSE_FLOAT_BFLOAT16(2048, 2048, 2048, 99, 0, false, false); static Graph* MultiSparseMatMul(int m, int n, int d, float sparsity_1, float sparsity_2, int copies) { Graph* g = new Graph(OpRegistry::Global()); for (int i = 0; i < copies; ++i) { SparseMatMulHelper<float, float>(g, d, n, m, sparsity_1, sparsity_2, true, false); SparseMatMulHelper<float, float>(g, m, d, n, sparsity_2, 0, false, true); } return g; } #define BM_SPARSE_MULTI(M, K, N, S1, S2, Copies) \ static void BM_Sparse_Multi##_##M##_##K##_##N##_##S1##_##S2##_##Copies(::testing::benchmark::State& state) { \ auto label = strings::Printf("%d_%d_%d_%d_%0.2f_%0.2f", M, K, N, Copies, \ S1 / 100.0, S2 / 100.0); \ state.SetLabel(label); \ auto g = MultiSparseMatMul(M, N, K, S1 / 100.0, S2 / 100.0, Copies); \ test::Benchmark("cpu", g, false).Run(state); \ state.SetItemsProcessed(state.iterations() * M * K * N * 2 * 2 * Copies); \ } \ BENCHMARK(BM_Sparse_Multi##_##M##_##K##_##N##_##S1##_##S2##_##Copies) \ ->UseRealTime(); BM_SPARSE_MULTI(1024, 2140, 4096, 0, 82, 1); BM_SPARSE_MULTI(1024, 4096, 2048, 83, 83, 1); BM_SPARSE_MULTI(400, 800, 2560, 85, 85, 1); BM_SPARSE_MULTI(400, 2560, 1024, 85, 85, 1); BM_SPARSE_MULTI(400, 1024, 256, 85, 85, 1); BM_SPARSE_MULTI(400, 256, 1, 85, 85, 1); BM_SPARSE_MULTI(2048, 1792, 1024, 85, 85, 1); BM_SPARSE_MULTI(2048, 1024, 768, 85, 85, 1); BM_SPARSE_MULTI(2048, 768, 512, 85, 85, 1); BM_SPARSE_MULTI(2048, 512, 256, 85, 85, 1); BM_SPARSE_MULTI(2048, 1792, 1024, 85, 85, 3); BM_SPARSE_MULTI(2048, 1024, 768, 85, 85, 3); BM_SPARSE_MULTI(2048, 768, 512, 85, 85, 3); BM_SPARSE_MULTI(2048, 512, 256, 85, 85, 3); BM_SPARSE_MULTI(2048, 1792, 1024, 85, 85, 6); BM_SPARSE_MULTI(2048, 1024, 768, 85, 85, 6); BM_SPARSE_MULTI(2048, 768, 512, 85, 85, 6); BM_SPARSE_MULTI(2048, 512, 256, 85, 85, 6); } namespace Eigen { namespace internal { class SparseMatmulOpTest : public ::testing::Test { protected: SparseMatmulOpTest() : PacketSize(Eigen::internal::packet_traits<float>::size) { typedef typename NumTraits<float>::Real RealFloat; for (int i = 0; i < kMaxPacketSize; ++i) { data1[i] = internal::random<float>() / RealFloat(PacketSize); data2[i] = internal::random<float>() / RealFloat(PacketSize); data3[i] = internal::random<float>() / RealFloat(PacketSize); } for (int i = kMaxPacketSize; i < kMaxPacketSize * 2; ++i) { data3[i] = internal::random<float>() / RealFloat(PacketSize); } for (int i = 0; i < kMaxPacketSize * 2; ++i) { uint16_t* data3_p = reinterpret_cast<uint16_t*>(&data3[i]); uint16_t* data3_bfloat16_p = reinterpret_cast<uint16_t*>(data3_bfloat16) + i; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ data3_p[1] = 0; data3_bfloat16_p[0] = data3_p[0]; #else data3_p[0] = 0; data3_bfloat16_p[0] = data3_p[1]; #endif } } bool areApprox(const float* a, const float* b, int size) { for (int i = 0; i < size; ++i) { if (a[i] != b[i] && !internal::isApprox(a[i], b[i])) { auto ma = Map<const Matrix<float, 1, Dynamic> >(a, size); auto mb = Map<const Matrix<float, 1, Dynamic> >(b, size); std::cout << "[" << ma << "]" << " != [" << mb << "], differences: [" << (mb - ma) << "]\n"; return false; } } return true; } #ifdef EIGEN_VECTORIZE_AVX512 static const int kMaxPacketSize = 16; #elif defined EIGEN_VECTORIZE_AVX || defined EIGEN_VECTORIZE_AVX2 static const int kMaxPacketSize = 8; #else static constexpr int kMaxPacketSize = 4; #endif typedef typename Eigen::internal::packet_traits<float>::type Packet; const int PacketSize; EIGEN_ALIGN_MAX float data1[kMaxPacketSize]; EIGEN_ALIGN_MAX float data2[kMaxPacketSize]; EIGEN_ALIGN_MAX float data3[kMaxPacketSize * 2]; EIGEN_ALIGN_MAX float data3_bfloat16[kMaxPacketSize]; EIGEN_ALIGN_MAX float ref[kMaxPacketSize]; public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW }; TEST_F(SparseMatmulOpTest, BroadcastPacketTest) { for (int i = 0; i < PacketSize; ++i) ref[i] = data1[0]; internal::pstoreu(data2, internal::pbroadcast_first<Packet>( internal::ploadu<Packet>(data1))); ASSERT_TRUE(areApprox(ref, data2, PacketSize)); if (PacketSize > 1) { for (int i = 0; i < PacketSize; ++i) ref[i] = data1[1]; internal::pstoreu(data2, internal::pbroadcast_second<Packet>( internal::ploadu<Packet>(data1))); ASSERT_TRUE(areApprox(ref, data2, PacketSize)); if (PacketSize > 2) { for (int i = 0; i < PacketSize; ++i) ref[i] = data1[2]; internal::pstoreu(data2, internal::pbroadcast_third<Packet>( internal::ploadu<Packet>(data1))); ASSERT_TRUE(areApprox(ref, data2, PacketSize)); if (PacketSize > 3) { for (int i = 0; i < PacketSize; ++i) ref[i] = data1[3]; internal::pstoreu(data2, internal::pbroadcast_fourth<Packet>( internal::ploadu<Packet>(data1))); ASSERT_TRUE(areApprox(ref, data2, PacketSize)); } } } } TEST_F(SparseMatmulOpTest, InterleavePacketTest) { if (PacketSize == 8) { for (int i = 0; i < PacketSize / 4; ++i) ref[i] = data1[i]; for (int i = PacketSize / 4; i < PacketSize / 2; ++i) ref[i] = data1[i + PacketSize / 4]; for (int i = PacketSize / 2; i < 3 * PacketSize / 4; ++i) ref[i] = data1[i - PacketSize / 4]; for (int i = 3 * PacketSize / 4; i < PacketSize; ++i) ref[i] = data1[i]; } else { for (int i = 0; i < PacketSize; ++i) ref[i] = data1[i]; } internal::pstoreu(data2, internal::pinterleave4x64<Packet>( internal::ploadu<Packet>(data1))); ASSERT_TRUE(areApprox(ref, data2, PacketSize)); } TEST_F(SparseMatmulOpTest, Bfloat16ExpandTest) { if (PacketSize == 8) { for (int i = 0; i < PacketSize / 2; ++i) { ref[i] = data3[i]; } for (int i = 0; i < PacketSize / 2; ++i) { ref[i + PacketSize / 2] = data3[i + PacketSize]; } } else { for (int i = 0; i < PacketSize; ++i) { ref[i] = data3[i]; } } internal::pstoreu(data2, internal::pexpand_bf16_l<Packet>( internal::ploadu<Packet>(data3_bfloat16))); ASSERT_TRUE(areApprox(ref, data2, PacketSize)); if (PacketSize == 8) { for (int i = 0; i < PacketSize / 2; ++i) { ref[i] = data3[i + PacketSize / 2]; } for (int i = 0; i < PacketSize / 2; ++i) { ref[i + PacketSize / 2] = data3[i + 3 * PacketSize / 2]; } } else { for (int i = 0; i < PacketSize; ++i) { ref[i] = data3[i + PacketSize]; } } internal::pstoreu(data2, internal::pexpand_bf16_u<Packet>( internal::ploadu<Packet>(data3_bfloat16))); ASSERT_TRUE(areApprox(ref, data2, PacketSize)); } TEST_F(SparseMatmulOpTest, Bfloat16LoadTest) { if (PacketSize >= 4) { for (int i = 0; i < 4; ++i) ref[i] = data3[i]; internal::pstoreu(data2, internal::pload4bf16<Packet>(data3_bfloat16)); ASSERT_TRUE(areApprox(ref, data2, 4)); internal::pstoreu(data2, internal::pload2bf16<Packet>(data3_bfloat16)); ASSERT_TRUE(areApprox(ref, data2, 2)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_matmul_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_matmul_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1c8ca7d3-f69b-4413-a156-dc617e13bd25
cpp
tensorflow/tensorflow
mfcc_dct
tensorflow/lite/kernels/internal/mfcc_dct.cc
tensorflow/core/kernels/mfcc_dct_test.cc
#include "tensorflow/lite/kernels/internal/mfcc_dct.h" #include <math.h> namespace tflite { namespace internal { MfccDct::MfccDct() : initialized_(false) {} bool MfccDct::Initialize(int input_length, int coefficient_count) { coefficient_count_ = coefficient_count; input_length_ = input_length; if (coefficient_count_ < 1) { return false; } if (input_length < 1) { return false; } if (coefficient_count_ > input_length_) { return false; } cosines_.resize(coefficient_count_); double fnorm = sqrt(2.0 / input_length_); const double pi = atan(1.0) * 4.0; double arg = pi / input_length_; for (int i = 0; i < coefficient_count_; ++i) { cosines_[i].resize(input_length_); for (int j = 0; j < input_length_; ++j) { cosines_[i][j] = fnorm * cos(i * arg * (j + 0.5)); } } initialized_ = true; return true; } void MfccDct::Compute(const std::vector<double> &input, std::vector<double> *output) const { if (!initialized_) { return; } output->resize(coefficient_count_); int length = input.size(); if (length > input_length_) { length = input_length_; } for (int i = 0; i < coefficient_count_; ++i) { double sum = 0.0; for (int j = 0; j < length; ++j) { sum += cosines_[i][j] * input[j]; } (*output)[i] = sum; } } } }
#include "tensorflow/core/kernels/mfcc_dct.h" #include <vector> #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { TEST(MfccDctTest, AgreesWithMatlab) { MfccDct dct; std::vector<double> input = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; const int kCoefficientCount = 6; ASSERT_TRUE(dct.Initialize(input.size(), kCoefficientCount)); std::vector<double> output; dct.Compute(input, &output); std::vector<double> expected = {12.1243556530, -4.1625617959, 0.0, -0.4082482905, 0.0, -0.0800788912}; ASSERT_EQ(output.size(), kCoefficientCount); for (int i = 0; i < kCoefficientCount; ++i) { EXPECT_NEAR(output[i], expected[i], 1e-10); } } TEST(MfccDctTest, InitializeFailsOnInvalidInput) { MfccDct dct1; EXPECT_FALSE(dct1.Initialize(-50, 1)); EXPECT_FALSE(dct1.Initialize(10, -4)); EXPECT_FALSE(dct1.Initialize(-1, -1)); EXPECT_FALSE(dct1.Initialize(20, 21)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/mfcc_dct.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_dct_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
dc61a88a-ab09-4c03-b39a-9428d13af170
cpp
tensorflow/tensorflow
split_op
tensorflow/compiler/tf2xla/kernels/split_op.cc
tensorflow/core/kernels/split_op_test.cc
#include <vector> #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/shape.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { class SplitOp : public XlaOpKernel { public: explicit SplitOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const int32_t num_split = num_outputs(); const TensorShape split_dim_shape = ctx->InputShape("split_dim"); const TensorShape input_shape = ctx->InputShape(1); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(split_dim_shape), errors::InvalidArgument("split_dim must be a scalar but has rank ", split_dim_shape.dims())); int64_t split_dim_orig; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(0, &split_dim_orig)); int32_t split_dim = split_dim_orig < 0 ? split_dim_orig + input_shape.dims() : split_dim_orig; OP_REQUIRES(ctx, 0 <= split_dim && split_dim < input_shape.dims(), errors::InvalidArgument("-input rank(-", input_shape.dims(), ") <= split_dim < input rank (", input_shape.dims(), "), but got ", split_dim_orig)); OP_REQUIRES( ctx, num_split > 0, errors::InvalidArgument( "Number of ways to split should be > 0, but got ", num_split)); xla::XlaBuilder* builder = ctx->builder(); xla::XlaOp input = ctx->Input(1); auto shape_or = builder->GetShape(input); OP_REQUIRES_OK(ctx, shape_or.status()); xla::Shape xla_shape = shape_or.value(); OP_REQUIRES( ctx, !xla_shape.is_dynamic_dimension(split_dim), errors::InvalidArgument( "Split op doesn't support split for the dynamic dimension")); OP_REQUIRES( ctx, xla_shape.dimensions(split_dim) % num_split == 0, errors::InvalidArgument( "Number of ways to split should evenly divide the split " "dimension, but got split_dim ", split_dim_orig, " (size = ", input_shape.dim_size(split_dim), ") ", "and num_split ", num_split)); const int32_t slice_size = input_shape.dim_size(split_dim) / num_split; std::vector<int64_t> begin(input_shape.dims(), 0); std::vector<int64_t> limits(input_shape.dims()); std::vector<int64_t> strides(input_shape.dims(), 1); for (int i = 0; i < input_shape.dims(); ++i) { int64_t dim = input_shape.dim_size(i); limits[i] = dim; } for (int i = 0; i < num_split; ++i) { begin[split_dim] = i * slice_size; limits[split_dim] = (i + 1) * slice_size; ctx->SetOutput(i, xla::Slice(input, begin, limits, strides)); } } }; REGISTER_XLA_OP(Name("Split").CompileTimeConstantInput("split_dim"), SplitOp); class SplitVOp : public XlaOpKernel { public: explicit SplitVOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const int32_t num_split = num_outputs(); const TensorShape input_shape = ctx->InputShape(0); const TensorShape index_shape = ctx->InputShape(2); OP_REQUIRES(ctx, index_shape.num_elements() == 1, errors::InvalidArgument( "split_dim_tensor must have exactly one element.")); int64_t split_dim_orig; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(2, &split_dim_orig)); int64_t split_dim = split_dim_orig < 0 ? split_dim_orig + input_shape.dims() : split_dim_orig; OP_REQUIRES(ctx, 0 <= split_dim && split_dim < input_shape.dims(), errors::InvalidArgument("-input rank(-", input_shape.dims(), ") <= split_dim < input rank (", input_shape.dims(), "), but got ", split_dim_orig)); xla::XlaOp input = ctx->Input(0); OP_REQUIRES(ctx, input_shape.dims() > 0, errors::InvalidArgument("Can't split a 0 dimensional input")); OP_REQUIRES( ctx, num_split > 0, errors::InvalidArgument( "Number of ways to split should be > 0, but got ", num_split)); int total_split_size = 0; int neg_one_dim = -1; const TensorShape split_size_shape = ctx->InputShape(1); OP_REQUIRES(ctx, split_size_shape.dims() == 1 && split_size_shape.num_elements() == num_split, errors::InvalidArgument( "shape of tensor describing " " the output must have dimension 1 and the same " " number of elements as the output. Got ", split_size_shape.dims(), "-D and ", split_size_shape.num_elements(), " elements")); std::vector<int64_t> split_sizes; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector(1, &split_sizes)); for (int i = 0; i < num_split; ++i) { int64_t slice_size = split_sizes[i]; OP_REQUIRES(ctx, slice_size >= -1, errors::InvalidArgument("Split size at index ", i, " must be >= -1, Got: ", slice_size)); if (slice_size == -1) { OP_REQUIRES( ctx, neg_one_dim == -1, errors::InvalidArgument("Only one dimensions can have a value of" "-1. Second one found at dimension ", i)); neg_one_dim = i; } else { total_split_size += slice_size; } } xla::XlaBuilder* builder = ctx->builder(); auto shape_or = builder->GetShape(input); OP_REQUIRES_OK(ctx, shape_or.status()); xla::Shape xla_shape = shape_or.value(); OP_REQUIRES( ctx, !xla_shape.is_dynamic_dimension(split_dim), errors::Unimplemented("SplitV op doesn't yet support dynamic split " "dimension.")); OP_REQUIRES( ctx, (neg_one_dim == -1 && total_split_size == xla_shape.dimensions(split_dim)) || (neg_one_dim >= 0 && total_split_size <= xla_shape.dimensions(split_dim)), errors::InvalidArgument("Determined shape must either match " "input shape along split_dim exactly if " "fully specified, or be less than the size of " "the input along split_dim if not fully " "specified. Got: ", total_split_size)); if (neg_one_dim >= 0) { split_sizes[neg_one_dim] = input_shape.dim_size(split_dim) - total_split_size; } std::vector<int64_t> begin(input_shape.dims(), 0); auto dim_sizes = input_shape.dim_sizes(); std::vector<int64_t> limits(dim_sizes.begin(), dim_sizes.end()); std::vector<int64_t> strides(input_shape.dims(), 1); for (int i = 0; i < num_split; ++i) { TensorShape output_shape(input_shape); int slice_size = split_sizes[i]; output_shape.set_dim(split_dim, slice_size); limits[split_dim] = begin[split_dim] + slice_size; ctx->SetOutput(i, xla::Slice(input, begin, limits, strides)); begin[split_dim] = limits[split_dim]; } } }; REGISTER_XLA_OP(Name("SplitV") .CompileTimeConstantInput("split_dim") .CompileTimeConstantInput("size_splits"), SplitVOp); } }
#include <initializer_list> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/bfloat16.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* MakeGraph(int split_dim, int num_split, std::initializer_list<int64_t> chunk_size) { Graph* g = new Graph(OpRegistry::Global()); TensorShape in_shape(chunk_size); in_shape.set_dim(split_dim, in_shape.dim_size(split_dim) * num_split); Tensor in(DataTypeToEnum<float>::value, in_shape); in.flat<float>().setRandom(); Tensor split_dim_tensor = test::AsScalar<int32>(split_dim); Node* split; TF_CHECK_OK(NodeBuilder(g->NewName("split"), "Split") .Input(test::graph::Constant(g, split_dim_tensor)) .Input(test::graph::Constant(g, in)) .Attr("num_split", num_split) .Finalize(g, &split)); return g; } #define BM_SPLIT_1D(num_split, chunk_size) \ static void BM_Split_1d_##num_split##_##chunk_size( \ ::testing::benchmark::State& state) { \ auto label = \ strings::Printf("1-D %d chunks of %d each", num_split, chunk_size); \ state.SetLabel(label); \ auto g = MakeGraph( 0, num_split, {chunk_size}); \ test::Benchmark("cpu", g, false).Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \ num_split * chunk_size); \ } \ BENCHMARK(BM_Split_1d_##num_split##_##chunk_size)->UseRealTime(); #define BM_SPLIT_2D(split_dim, num_split, chunk_size0, chunk_size1) \ static void \ BM_Split_2d_##split_dim##_##num_split##_##chunk_size0##_##chunk_size1( \ ::testing::benchmark::State& state) { \ auto label = \ strings::Printf("2-D %d chunks in dim %d of (%d * %d) each", \ num_split, split_dim, chunk_size0, chunk_size1); \ state.SetLabel(label); \ auto g = MakeGraph(split_dim, num_split, {chunk_size0, chunk_size1}); \ test::Benchmark("cpu", g, false).Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \ num_split * chunk_size0 * chunk_size1); \ } \ BENCHMARK( \ BM_Split_2d_##split_dim##_##num_split##_##chunk_size0##_##chunk_size1) \ ->UseRealTime(); BM_SPLIT_1D(5, 1); BM_SPLIT_1D(262144, 1); BM_SPLIT_1D(1, 100000); BM_SPLIT_1D(5, 100000); BM_SPLIT_1D(10, 4194304); BM_SPLIT_1D(2, 4194304); BM_SPLIT_1D(100, 1024); BM_SPLIT_1D(32768, 1024); BM_SPLIT_2D(0, 1024, 1, 10); BM_SPLIT_2D(0, 1024, 10, 10); BM_SPLIT_2D(0, 512, 1024, 256); BM_SPLIT_2D(0, 20, 100000, 5); BM_SPLIT_2D(0, 2, 3, 524288); BM_SPLIT_2D(0, 100, 4096, 512); BM_SPLIT_2D(1, 1024, 1, 10); BM_SPLIT_2D(1, 1024, 10, 10); BM_SPLIT_2D(1, 512, 1024, 256); BM_SPLIT_2D(1, 20, 100000, 5); BM_SPLIT_2D(1, 2, 3, 524288); BM_SPLIT_2D(1, 100, 4096, 512); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/split_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/split_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
457e74e8-b51e-4693-99c9-01e8f9492e91
cpp
tensorflow/tensorflow
composite_tensor_variant
tensorflow/core/kernels/composite_tensor_variant.cc
tensorflow/core/kernels/composite_tensor_variant_test.cc
#include "tensorflow/core/kernels/composite_tensor_variant.h" #include "tensorflow/core/framework/variant_op_registry.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/protobuf/composite_tensor_variant.pb.h" #include "tensorflow/core/protobuf/struct.pb.h" namespace tensorflow { constexpr const char CompositeTensorVariant::kTypeName[]; CompositeTensorVariant::CompositeTensorVariant( const CompositeTensorVariantMetadata& metadata, absl::Span<Tensor> flat_components) : flat_components_(flat_components.begin(), flat_components.end()), metadata_(new CompositeTensorVariantMetadata()) { *metadata_ = metadata; } CompositeTensorVariant::CompositeTensorVariant() : metadata_(new CompositeTensorVariantMetadata()) {} CompositeTensorVariant::CompositeTensorVariant( const CompositeTensorVariant& other) : flat_components_(other.flat_components_), metadata_(new CompositeTensorVariantMetadata()) { *metadata_ = *other.metadata_; } void CompositeTensorVariant::Encode(VariantTensorData* data) const { data->set_type_name(TypeName()); metadata_->SerializeToString(&data->metadata_string()); for (const Tensor& tensor : flat_components_) { data->add_tensor(tensor); } } bool CompositeTensorVariant::Decode(const VariantTensorData& data) { if (!metadata_->ParseFromString(data.metadata_string())) { return false; } flat_components_ = data.tensors(); return true; } string CompositeTensorVariant::DebugString() const { string result("<CompositeTensorVariant type="); result.append(TypeSpecProto::TypeSpecClass_Name( metadata_->type_spec_proto().type_spec_class())); result.append(", components=["); for (const auto& tensor : flat_components_) { if (&tensor != &flat_components_[0]) { result.append(", "); } result.append(tensor.DebugString()); } result.append("]>"); return result; } REGISTER_UNARY_VARIANT_DECODE_FUNCTION(CompositeTensorVariant, CompositeTensorVariant::kTypeName); }
#include "tensorflow/core/kernels/composite_tensor_variant.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/composite_tensor_variant.pb.h" namespace tensorflow { namespace { constexpr const char* k2DRaggedTensorSpec = R"( type_spec_proto: { type_spec_class: RAGGED_TENSOR_SPEC type_state: { tuple_value: { values: [ {tensor_shape_value: {dim: [{size: -1}, {size: -1}]}}, # shape {tensor_dtype_value: DT_INT32}, # dtype {int64_value: 1}, # ragged_rank {tensor_dtype_value: DT_INT64} # row_splits_dtype ] } } })"; CompositeTensorVariant Make2DRaggedTensor(const std::vector<int32>& values, const std::vector<int64_t>& splits) { CompositeTensorVariantMetadata metadata; EXPECT_TRUE( protobuf::TextFormat::ParseFromString(k2DRaggedTensorSpec, &metadata)); std::vector<Tensor> components; components.push_back(test::AsTensor<int32>(values)); components.push_back(test::AsTensor<int64_t>(splits)); CompositeTensorVariant v(metadata, absl::MakeSpan(components)); return v; } TEST(CompositeTensorVariantTest, EncodeAndDecodeRagged) { CompositeTensorVariant v = Make2DRaggedTensor( {5, 5, 3, 4, 1, 8}, {0, 2, 3, 6}); Tensor t(DT_VARIANT, {}); t.flat<Variant>()(0) = v; auto* decoded = t.flat<Variant>()(0).get<CompositeTensorVariant>(); EXPECT_EQ(v.metadata().SerializeAsString(), decoded->metadata().SerializeAsString()); EXPECT_EQ(v.flat_components().size(), 2); test::ExpectTensorEqual<int32>(v.flat_components()[0], decoded->flat_components()[0]); test::ExpectTensorEqual<int64_t>(v.flat_components()[1], decoded->flat_components()[1]); } TEST(CompositeTensorVariantTest, DebugStringForDefaultConstructed) { CompositeTensorVariant v; EXPECT_EQ(v.DebugString(), "<CompositeTensorVariant type=UNKNOWN, components=[]>"); } TEST(CompositeTensorVariantTest, DebugStringForRagged) { CompositeTensorVariant v = Make2DRaggedTensor( {5, 5, 3, 4, 1}, {0, 2, 3, 5}); EXPECT_EQ(v.DebugString(), "<CompositeTensorVariant type=RAGGED_TENSOR_SPEC, " "components=[Tensor<type: int32 shape: [5] values: 5 5 3...>, " "Tensor<type: int64 shape: [4] values: 0 2 3...>]>"); } TEST(CompositeTensorVariantTest, TypeName) { CompositeTensorVariant v; EXPECT_EQ(v.TypeName(), "CompositeTensorVariant"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/composite_tensor_variant.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/composite_tensor_variant_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
13773e58-3b6b-4bb8-bc58-34d395c25d1b
cpp
tensorflow/tensorflow
decode_wav_op
tensorflow/core/kernels/decode_wav_op.cc
tensorflow/core/kernels/decode_wav_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/wav/wav_io.h" namespace tensorflow { class DecodeWavOp : public OpKernel { public: explicit DecodeWavOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("desired_channels", &desired_channels_)); OP_REQUIRES_OK(context, context->GetAttr("desired_samples", &desired_samples_)); } void Compute(OpKernelContext* context) override { const Tensor& contents = context->input(0); OP_REQUIRES(context, TensorShapeUtils::IsScalar(contents.shape()), errors::InvalidArgument("contents must be scalar, got shape ", contents.shape().DebugString())); const string& wav_string = contents.scalar<tstring>()(); OP_REQUIRES(context, wav_string.size() <= std::numeric_limits<int>::max(), errors::InvalidArgument("WAV contents are too large for int: ", wav_string.size())); std::vector<float> decoded_samples; uint32 decoded_sample_count; uint16 decoded_channel_count; uint32 decoded_sample_rate; OP_REQUIRES_OK(context, wav::DecodeLin16WaveAsFloatVector( wav_string, &decoded_samples, &decoded_sample_count, &decoded_channel_count, &decoded_sample_rate)); int32_t output_sample_count; if (desired_samples_ == -1) { output_sample_count = decoded_sample_count; } else { output_sample_count = desired_samples_; } int32_t output_channel_count; if (desired_channels_ == -1) { output_channel_count = decoded_channel_count; } else { output_channel_count = desired_channels_; } Tensor* output = nullptr; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({output_sample_count, output_channel_count}), &output)); auto output_matrix = output->matrix<float>(); for (int sample = 0; sample < output_sample_count; ++sample) { for (int channel = 0; channel < output_channel_count; ++channel) { float output_value; if (sample >= decoded_sample_count) { output_value = 0.0f; } else { int source_channel; if (channel < decoded_channel_count) { source_channel = channel; } else { source_channel = decoded_channel_count - 1; } const int decoded_index = (sample * decoded_channel_count) + source_channel; output_value = decoded_samples[decoded_index]; } output_matrix(sample, channel) = output_value; } } Tensor* sample_rate_output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape({}), &sample_rate_output)); sample_rate_output->flat<int32>()(0) = decoded_sample_rate; } private: int32 desired_channels_; int32 desired_samples_; }; REGISTER_KERNEL_BUILDER(Name("DecodeWav").Device(DEVICE_CPU), DecodeWavOp); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/audio_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace ops { namespace { TEST(DecodeWavOpTest, DecodeWavTest) { Scope root = Scope::NewRootScope(); std::vector<uint8> wav_data = { 'R', 'I', 'F', 'F', 44, 0, 0, 0, 'W', 'A', 'V', 'E', 'f', 'm', 't', ' ', 16, 0, 0, 0, 1, 0, 1, 0, 0x13, 0x37, 0, 0, 0x26, 0x6e, 0, 0, 2, 0, 16, 0, 'd', 'a', 't', 'a', 8, 0, 0, 0, 0, 0, 0xff, 0x3f, 0xff, 0x7f, 0x00, 0x80, }; Tensor content_tensor = test::AsScalar<tstring>(string(wav_data.begin(), wav_data.end())); Output content_op = Const(root.WithOpName("content_op"), Input::Initializer(content_tensor)); DecodeWav decode_wav_op = DecodeWav(root.WithOpName("decode_wav_op"), content_op); TF_ASSERT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {decode_wav_op.audio, decode_wav_op.sample_rate}, &outputs)); const Tensor& audio = outputs[0]; const int sample_rate = outputs[1].flat<int32>()(0); EXPECT_EQ(2, audio.dims()); EXPECT_EQ(1, audio.dim_size(1)); EXPECT_EQ(4, audio.dim_size(0)); EXPECT_NEAR(0.0f, audio.flat<float>()(0), 1e-4f); EXPECT_NEAR(0.5f, audio.flat<float>()(1), 1e-4f); EXPECT_NEAR(1.0f, audio.flat<float>()(2), 1e-4f); EXPECT_NEAR(-1.0f, audio.flat<float>()(3), 1e-4f); EXPECT_EQ(14099, sample_rate); } TEST(DecodeWavOpTest, DecodeWav_ShapeFn) { ShapeInferenceTestOp op("DecodeWav"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]"); TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav") .Input({"a", 0, DT_STRING}) .Finalize(&op.node_def)); INFER_OK(op, "[]", "[?,?];[]"); TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav") .Input({"a", 0, DT_STRING}) .Attr("desired_samples", 42) .Finalize(&op.node_def)); INFER_OK(op, "[]", "[42,?];[]"); TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav") .Input({"a", 0, DT_STRING}) .Attr("desired_samples", -2) .Finalize(&op.node_def)); INFER_ERROR("samples must be non-negative, got -2", op, "[]"); TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav") .Input({"a", 0, DT_STRING}) .Attr("desired_channels", 2) .Finalize(&op.node_def)); INFER_OK(op, "[]", "[?,2];[]"); TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav") .Input({"a", 0, DT_STRING}) .Attr("desired_channels", -2) .Finalize(&op.node_def)); INFER_ERROR("channels must be non-negative, got -2", op, "[]"); } TEST(DecodeWavOpTest, DecodeWavWithJunkChunkTest) { Scope root = Scope::NewRootScope(); std::vector<uint8> wav_data = { 'R', 'I', 'F', 'F', 76, 0, 0, 0, 'W', 'A', 'V', 'E', 'J', 'U', 'N', 'K', 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 'f', 'm', 't', ' ', 16, 0, 0, 0, 1, 0, 1, 0, 0x13, 0x37, 0, 0, 0x26, 0x6e, 0, 0, 2, 0, 16, 0, 'd', 'a', 't', 'a', 8, 0, 0, 0, 0, 0, 0xff, 0x3f, 0xff, 0x7f, 0x00, 0x80, }; Tensor content_tensor = test::AsScalar<tstring>(string(wav_data.begin(), wav_data.end())); Output content_op = Const(root.WithOpName("content_op"), Input::Initializer(content_tensor)); DecodeWav decode_wav_op = DecodeWav(root.WithOpName("decode_wav_op"), content_op); TF_ASSERT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {decode_wav_op.audio, decode_wav_op.sample_rate}, &outputs)); const Tensor& audio = outputs[0]; const int sample_rate = outputs[1].flat<int32>()(0); EXPECT_EQ(2, audio.dims()); EXPECT_EQ(1, audio.dim_size(1)); EXPECT_EQ(4, audio.dim_size(0)); EXPECT_NEAR(0.0f, audio.flat<float>()(0), 1e-4f); EXPECT_NEAR(0.5f, audio.flat<float>()(1), 1e-4f); EXPECT_NEAR(1.0f, audio.flat<float>()(2), 1e-4f); EXPECT_NEAR(-1.0f, audio.flat<float>()(3), 1e-4f); EXPECT_EQ(14099, sample_rate); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/decode_wav_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/decode_wav_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1551d810-6514-45c0-92cb-2a4bb061fe08
cpp
tensorflow/tensorflow
identity_n_op
tensorflow/core/kernels/identity_n_op.cc
tensorflow/core/kernels/identity_n_op_test.cc
#include "tensorflow/core/kernels/identity_n_op.h" #include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" namespace tensorflow { REGISTER_KERNEL_BUILDER(Name("IdentityN").Device(DEVICE_DEFAULT), IdentityNOp); REGISTER_KERNEL_BUILDER(Name("IdentityN").Device(DEVICE_TPU_SYSTEM), IdentityNOp); REGISTER_INPUT_COLOCATION_EXEMPTION("IdentityN"); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class IdentityNOpTest : public OpsTestBase { protected: Status Init(DataType input0_type, DataType input1_type) { TF_CHECK_OK(NodeDefBuilder("op", "IdentityN") .Input(FakeInput({input0_type, input1_type})) .Finalize(node_def())); return InitOp(); } }; TEST_F(IdentityNOpTest, Int32DoubleSuccess_6) { TF_ASSERT_OK(Init(DT_INT32, DT_DOUBLE)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<double>(TensorShape({6}), {7.3, 8.3, 9.3, 10.3, 11.3, 12.3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected0(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected0, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected0, *GetOutput(0)); Tensor expected1(allocator(), DT_DOUBLE, TensorShape({6})); test::FillValues<double>(&expected1, {7.3, 8.3, 9.3, 10.3, 11.3, 12.3}); test::ExpectTensorEqual<double>(expected1, *GetOutput(1)); } TEST_F(IdentityNOpTest, Int32Success_2_3) { TF_ASSERT_OK(Init(DT_INT32, DT_INT32)); AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<int32>(TensorShape({2, 3}), {7, 8, 9, 10, 11, 12}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2, 3})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); test::FillValues<int32>(&expected, {7, 8, 9, 10, 11, 12}); test::ExpectTensorEqual<int32>(expected, *GetOutput(1)); } TEST_F(IdentityNOpTest, StringInt32Success) { TF_ASSERT_OK(Init(DT_STRING, DT_INT32)); AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); AddInputFromArray<int32>(TensorShape({8}), {1, 3, 5, 7, 9, 11, 13, 15}); TF_ASSERT_OK(RunOpKernel()); Tensor expected0(allocator(), DT_STRING, TensorShape({6})); test::FillValues<tstring>(&expected0, {"A", "b", "C", "d", "E", "f"}); test::ExpectTensorEqual<tstring>(expected0, *GetOutput(0)); Tensor expected1(allocator(), DT_INT32, TensorShape({8})); test::FillValues<int32>(&expected1, {1, 3, 5, 7, 9, 11, 13, 15}); test::ExpectTensorEqual<int32>(expected1, *GetOutput(1)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/identity_n_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/identity_n_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
08ae7b8a-80ab-4ab8-b684-acc4b755c8a0
cpp
tensorflow/tensorflow
collective_nccl
tensorflow/core/kernels/collective_nccl.cc
tensorflow/core/kernels/collective_nccl_test.cc
#include "tensorflow/core/kernels/collective_nccl.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/common_runtime/collective_util.h" #include "tensorflow/core/nccl/nccl_manager.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { NcclBase::NcclBase(CollectiveType type, const string& name) : type_(type), name_(name), col_ctx_(nullptr), col_params_(nullptr) {} Status NcclBase::InitializeCollectiveParams(CollectiveParams* col_params) { if (type_ != col_params->instance.type) { return errors::Internal("Expected initialized type ", type_, " to match type in CollectiveParams ", col_params->instance.type); } const char* expected_name; switch (type_) { case REDUCTION_COLLECTIVE: expected_name = "NcclReduce"; break; case BROADCAST_COLLECTIVE: expected_name = "NcclBroadcast"; break; case GATHER_COLLECTIVE: expected_name = "NcclGather"; break; case REDUCE_SCATTER_COLLECTIVE: expected_name = "NcclReduceScatter"; break; case ALL_TO_ALL_COLLECTIVE: expected_name = "NcclAllToAll"; break; default: return errors::Internal("Unexpected CollectiveType ", type_); } if (expected_name != col_params->instance.impl_details.collective_name) { return errors::Internal("Unexpected combination of collective type ", col_params->instance.type, " and collective name ", col_params->instance.impl_details.collective_name, ", expected name ", expected_name); } return OkStatus(); } Status NcclBase::InitializeCollectiveContext( std::shared_ptr<CollectiveContext> col_ctx) { col_ctx_ = col_ctx; col_params_ = col_ctx->col_params.get(); return collective_util::InitializeDeviceAndLocality( col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device, &col_ctx->device_locality); } } #endif
#ifdef GOOGLE_CUDA #include <algorithm> #include "absl/memory/memory.h" #include "tensorflow/core/common_runtime/base_collective_executor.h" #include "tensorflow/core/common_runtime/collective_test_util.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/device_resolver_local.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/common_runtime/process_util.h" #include "tensorflow/core/common_runtime/test_collective_executor_mgr.h" #include "tensorflow/core/framework/collective.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/unbounded_work_queue.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" namespace tensorflow { static constexpr int kStepId = 10; std::unique_ptr<OpKernel> GetKernel(const NodeDef& node, DeviceBase* device) { Status status; std::unique_ptr<OpKernel> k = CreateOpKernel( DEVICE_GPU, device, device->GetAllocator(AllocatorAttributes()), node, TF_GRAPH_DEF_VERSION, &status); if (!status.ok()) LOG(FATAL) << status; return k; } std::unique_ptr<OpKernel> GetAdd(DeviceBase* device) { NodeDef node_def; NodeDefBuilder builder("add_node", "Add"); TF_CHECK_OK(builder.Attr("T", DT_FLOAT) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(&node_def)); return GetKernel(node_def, device); } std::unique_ptr<OpKernel> GetDiv(DeviceBase* device) { NodeDef node_def; NodeDefBuilder builder("add_node", "Div"); TF_CHECK_OK(builder.Attr("T", DT_FLOAT) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(&node_def)); return GetKernel(node_def, device); } class NcclTestBase : public ::testing::Test { protected: class DeviceInstance; NcclTestBase(CollectiveType collective_type, const string& collective_name) : collective_type_(collective_type), collective_name_(collective_name) {} void Init(const int num_ranks) { setenv("NCCL_DEBUG", "INFO", 1 ); setenv("NCCL_LAUNCH_MODE", "PARALLEL", 1 ); test_env_ = CreateCollectiveTestEnv( 1, num_ranks, DEVICE_GPU, true); for (int rank = 0; rank < num_ranks; ++rank) { instances_.push_back(std::make_unique<DeviceInstance>( rank, collective_name_, collective_type_, test_env_.get())); } } virtual void InitInput(Tensor* input, const int rank) = 0; virtual void InitExpected(std::vector<float>* expected, const int tensor_length, const int current_rank, const int num_ranks) = 0; virtual void InitDevice(DeviceInstance* di) = 0; virtual void RunCollectiveOnDevice(DeviceInstance* di) = 0; void RunCollective() { std::atomic<int> done(0); for (const auto& instance : instances_) { DeviceInstance* di = instance.get(); InitDevice(di); SchedClosure([this, di, &done] { RunCollectiveOnDevice(di); ++done; }); } while (done < static_cast<int>(instances_.size())) { Env::Default()->SleepForMicroseconds(1000); } } void RunTest(int num_ranks, int input_length) { Init(num_ranks); if (num_ranks > test_env_->device_mgr->NumDevices()) { LOG(WARNING) << "Skipping test because required " << num_ranks << " GPUs but found " << test_env_->device_mgr->NumDevices(); return; } for (int rank = 0; rank < num_ranks; ++rank) { instances_[rank]->InitTensor( DT_FLOAT, TensorShape({input_length}), [this, rank](Tensor* t) { InitInput(t, rank); }); } RunCollective(); for (int rank = 0; rank < instances_.size(); ++rank) { std::vector<float> expected; InitExpected(&expected, input_length, rank, num_ranks); if (VLOG_IS_ON(3)) { string str_buf; for (const auto& x : expected) { strings::StrAppend(&str_buf, " ", x); } VLOG(3) << "Expected output " << str_buf; } TF_ASSERT_OK(instances_[rank]->status_); VLOG(2) << "rank " << rank << " output " << &instances_[rank]->output_ << " buf " << DMAHelper::base(&instances_[rank]->output_); VLOG(3) << "rank " << rank << " got output tensor " << instances_[rank]->output_.DebugString( instances_[rank]->output_.NumElements()); test::ExpectTensorEqual<float>(test::AsTensor<float>(expected), instances_[rank]->output_); } } class DeviceInstance { public: DeviceInstance(int rank, const string& collective_name, CollectiveType collective_type, CollectiveTestEnv* test_env) : test_env_(test_env) { col_params_ = CreateCollectiveParams(*test_env_, rank, collective_name, collective_type, DT_FLOAT, TensorShape()); string device_name = col_params_->group.members[rank].device.name(); TF_CHECK_OK(test_env_->device_mgr->LookupDevice(device_name, &device_)) << "Could not find device " << device_name << " existing devices " << test_env_->device_mgr->DebugString(); merge_op_ = GetAdd(device_); final_op_ = GetDiv(device_); } void InitTensor(DataType dtype, const TensorShape& shape, const std::function<void(Tensor*)>& init_f) { input_ = Tensor(dtype, shape); init_f(&input_); if (VLOG_IS_ON(3)) { VLOG(3) << "input tensor " << input_.DebugString(shape.num_elements()); } else { VLOG(2) << "input tensor " << input_.DebugString(); } } void RunReduce() { output_ = input_; status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_, &input_, &output_); } void RunReduceScatter() { auto output_shape = input_.shape(); output_shape.set_dim( 0, output_shape.dim_size(0) / col_params_->group.group_size); output_ = Tensor(DT_FLOAT, output_shape); status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_, &input_, &output_); } void RunBroadcast() { output_ = input_; status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_, &input_, &output_); } void RunGather() { auto output_shape = input_.shape(); output_shape.set_dim( 0, output_shape.dim_size(0) * col_params_->group.group_size); output_ = Tensor(DT_FLOAT, output_shape); status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_, &input_, &output_); } void RunAllToAll() { output_ = Tensor(DT_FLOAT, input_.shape()); status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_, &input_, &output_); } CollectiveTestEnv* test_env_; Tensor input_; Tensor output_; Device* device_; core::RefCountPtr<CollectiveParams> col_params_; std::unique_ptr<OpKernel> merge_op_; std::unique_ptr<OpKernel> final_op_; Status status_; }; CollectiveType collective_type_; const string collective_name_; std::vector<std::unique_ptr<DeviceInstance>> instances_; mutex mu_; int32 op_counter_ TF_GUARDED_BY(mu_) = 0; std::unique_ptr<CollectiveTestEnv> test_env_; }; class NcclReducerTest : public NcclTestBase { protected: NcclReducerTest() : NcclTestBase(REDUCTION_COLLECTIVE, "NcclReduce") {} ~NcclReducerTest() override = default; void InitInput(Tensor* input, const int rank) override { for (size_t i = 0; i < input->NumElements(); ++i) { float value = pow(10, rank) * i; input->flat<float>()(i) = value; } } void InitExpected(std::vector<float>* expected, const int tensor_length, const int current_rank, const int num_ranks) override { expected->resize(tensor_length); for (int i = 0; i < tensor_length; ++i) { float expected_sum = 0.0; for (int rank = 0; rank < num_ranks; ++rank) { float value = pow(10, rank) * i; expected_sum += value; } (*expected)[i] = expected_sum / num_ranks; } } void InitDevice(DeviceInstance* di) override { di->col_params_->merge_op = di->merge_op_.get(); di->col_params_->final_op = di->final_op_.get(); } void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunReduce(); } }; class NcclReduceScattererTest : public NcclTestBase { protected: NcclReduceScattererTest() : NcclTestBase(REDUCE_SCATTER_COLLECTIVE, "NcclReduceScatter") {} ~NcclReduceScattererTest() override = default; void InitInput(Tensor* input, const int rank) override { for (size_t i = 0; i < input->NumElements(); ++i) { float value = pow(10, rank) * i; input->flat<float>()(i) = value; } } void InitExpected(std::vector<float>* expected, const int tensor_length, const int current_rank, const int num_ranks) override { const int output_length = tensor_length / num_ranks; expected->resize(output_length); for (int i = 0; i < output_length; ++i) { float expected_sum = 0.0; for (int rank = 0; rank < num_ranks; ++rank) { float value = pow(10, rank) * (i + current_rank * output_length); expected_sum += value; } (*expected)[i] = expected_sum / num_ranks; } } void InitDevice(DeviceInstance* di) override { di->col_params_->merge_op = di->merge_op_.get(); di->col_params_->final_op = di->final_op_.get(); } void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunReduceScatter(); } }; class NcclBroadcasterTest : public NcclTestBase { protected: NcclBroadcasterTest() : NcclTestBase(BROADCAST_COLLECTIVE, "NcclBroadcast") {} ~NcclBroadcasterTest() override = default; void InitInput(Tensor* input, const int rank) override { bool source = rank == source_rank_; for (size_t i = 0; i < input->NumElements(); ++i) { input->flat<float>()(i) = source ? static_cast<float>(i) : -1.0; } } void InitExpected(std::vector<float>* expected, const int tensor_length, const int current_rank, const int num_ranks) override { expected->resize(tensor_length); for (int i = 0; i < tensor_length; ++i) { (*expected)[i] = i; } } void InitDevice(DeviceInstance* di) override { di->col_params_->source_rank = source_rank_; di->col_params_->is_source = di->col_params_->default_rank == source_rank_; } void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunBroadcast(); } int source_rank_ = 0; }; class NcclGathererTest : public NcclTestBase { protected: NcclGathererTest() : NcclTestBase(GATHER_COLLECTIVE, "NcclGather") {} ~NcclGathererTest() override = default; void InitInput(Tensor* input, const int rank) override { for (size_t i = 0; i < input->NumElements(); ++i) { float value = pow(10, rank) * i; input->flat<float>()(i) = value; } } void InitExpected(std::vector<float>* expected, const int tensor_length, const int current_rank, const int num_ranks) override { expected->resize(tensor_length * num_ranks, -1); for (int rank = 0, i = 0; rank < num_ranks; ++rank) { for (int j = 0; j < tensor_length; ++j, ++i) { (*expected)[i] = pow(10, rank) * j; } } } void InitDevice(DeviceInstance* di) override {} void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunGather(); } int source_rank_ = 0; }; class NcclAllToAllTest : public NcclTestBase { protected: NcclAllToAllTest() : NcclTestBase(ALL_TO_ALL_COLLECTIVE, "NcclAllToAll") {} ~NcclAllToAllTest() override = default; void InitInput(Tensor* input, const int rank) override { for (size_t i = 0; i < input->NumElements(); ++i) { float value = rank * input->NumElements() + i; input->flat<float>()(i) = value; } } void InitExpected(std::vector<float>* expected, const int tensor_length, const int current_rank, const int num_ranks) override { expected->resize(tensor_length); const int part_size = tensor_length / num_ranks; for (int rank = 0, i = 0; rank < num_ranks; ++rank) { for (int j = 0; j < part_size; ++j, ++i) { const int part_index = current_rank + rank * num_ranks; (*expected)[i] = part_index * part_size + j; } } } void InitDevice(DeviceInstance* di) override {} void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunAllToAll(); } int source_rank_ = 0; }; TEST_F(NcclReducerTest, Test2Dev16Len) { RunTest(2, 16); } TEST_F(NcclReducerTest, Test4Dev16Len) { RunTest(4, 16); } TEST_F(NcclReducerTest, Test8Dev16Len) { RunTest(8, 16); } TEST_F(NcclReducerTest, Test8Dev128Len) { RunTest(8, 128); } TEST_F(NcclReducerTest, Test8Dev1048576Len) { RunTest(8, 1048576); } TEST_F(NcclBroadcasterTest, Test2Dev16LenSrc0) { RunTest(2, 16); } TEST_F(NcclBroadcasterTest, Test4Dev16LenSrc1) { source_rank_ = 1; RunTest(4, 16); } TEST_F(NcclBroadcasterTest, Test8Dev16LenSrc7) { source_rank_ = 7; RunTest(8, 16); } TEST_F(NcclBroadcasterTest, Test8Dev128LenSrc0) { RunTest(8, 128); } TEST_F(NcclBroadcasterTest, Test8Dev1048576LenSrc0) { RunTest(8, 1048576); } TEST_F(NcclGathererTest, Test2Dev16Len) { RunTest(2, 16); } TEST_F(NcclGathererTest, Test4Dev16Len) { RunTest(4, 16); } TEST_F(NcclGathererTest, Test8Dev16Len) { RunTest(8, 16); } TEST_F(NcclGathererTest, Test8Dev128Len) { RunTest(8, 128); } TEST_F(NcclGathererTest, Test8Dev1048576Len) { RunTest(8, 1048576); } TEST_F(NcclReduceScattererTest, Test2Dev16Len) { RunTest(2, 16); } TEST_F(NcclReduceScattererTest, Test4Dev16Len) { RunTest(4, 16); } TEST_F(NcclReduceScattererTest, Test8Dev16Len) { RunTest(8, 16); } TEST_F(NcclReduceScattererTest, Test8Dev128Len) { RunTest(8, 128); } TEST_F(NcclReduceScattererTest, Test8Dev1048576Len) { RunTest(8, 1048576); } TEST_F(NcclAllToAllTest, Test2Dev16Len) { RunTest(2, 16); } TEST_F(NcclAllToAllTest, Test4Dev16Len) { RunTest(4, 16); } TEST_F(NcclAllToAllTest, Test8Dev16Len) { RunTest(8, 16); } TEST_F(NcclAllToAllTest, Test8Dev128Len) { RunTest(8, 128); } TEST_F(NcclAllToAllTest, Test8Dev1048576Len) { RunTest(8, 1048576); } } #endif
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/collective_nccl.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/collective_nccl_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
41f6164c-1583-47b4-8f87-68dc76563a64
cpp
tensorflow/tensorflow
cast_op
tensorflow/compiler/tf2xla/kernels/cast_op.cc
tensorflow/core/kernels/cast_op_test.cc
#include "tensorflow/compiler/tf2xla/lib/broadcast.h" #include "tensorflow/compiler/tf2xla/lib/util.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/arithmetic.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/primitive_util.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { namespace { class CastOp : public XlaOpKernel { public: explicit CastOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("SrcT", &src_dtype_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("DstT", &dst_dtype_)); OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(src_dtype_, &src_type_)); OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(dst_dtype_, &dst_type_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("Truncate", &use_truncation_)); } void Compile(XlaOpKernelContext* ctx) override { xla::XlaBuilder* builder = ctx->builder(); xla::XlaOp input = ctx->Input(0); xla::XlaOp output; if (src_dtype_ == dst_dtype_) { output = input; } else if (dst_dtype_ == DT_BOOL) { output = xla::Ne(input, XlaHelpers::Zero(builder, src_dtype_)); } else if (xla::primitive_util::IsComplexType(src_type_) && !xla::primitive_util::IsComplexType(dst_type_)) { output = xla::ConvertElementType(xla::Real(input), dst_type_); } else { if (use_truncation_) { OP_REQUIRES( ctx, xla::primitive_util::IsFloatingPointType(src_type_) && xla::primitive_util::IsFloatingPointType(dst_type_), errors::Unimplemented("Truncate attribute is only " "implemented for floating point datatypes.")); int mantissa_difference = xla::primitive_util::SignificandWidth(src_type_) - xla::primitive_util::SignificandWidth(dst_type_); OP_REQUIRES(ctx, mantissa_difference > 0, errors::Unimplemented( "Truncate attribute is only implemented in cases where " "dst datatype " "has fewer mantissa bits than the src datatype")); int src_bitwidth = xla::primitive_util::BitWidth(src_type_); int64_t mask = ~((1L << mantissa_difference) - 1); xla::PrimitiveType same_width_int = xla::primitive_util::UnsignedIntegralTypeForBitWidth(src_bitwidth); OP_REQUIRES(ctx, same_width_int != xla::PRIMITIVE_TYPE_INVALID, errors::Unimplemented("Unexpected type bitwidth")); input = xla::BitcastConvertType( xla::And( xla::BitcastConvertType(input, same_width_int), ::tensorflow::IntegerLiteral(builder, same_width_int, mask)), src_type_); } output = xla::ConvertElementType(input, dst_type_); } ctx->SetOutput(0, output); } protected: DataType src_dtype_, dst_dtype_; xla::PrimitiveType src_type_, dst_type_; bool use_truncation_; CastOp(const CastOp&) = delete; void operator=(const CastOp&) = delete; }; REGISTER_XLA_OP(Name("Cast"), CastOp); class BitcastOp : public XlaOpKernel { public: explicit BitcastOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &src_dtype_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("type", &dst_dtype_)); OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(src_dtype_, &src_type_)); OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(dst_dtype_, &dst_type_)); } void Compile(XlaOpKernelContext* ctx) override { xla::XlaOp input = ctx->Input(0); xla::XlaOp output; if (src_dtype_ == dst_dtype_) { output = input; ctx->SetOutput(0, output); return; } OP_REQUIRES(ctx, !xla::primitive_util::IsComplexType(src_type_) && !xla::primitive_util::IsComplexType(dst_type_), errors::Unimplemented("Complex types not supported.")); auto input_bit_width = xla::primitive_util::BitWidth(src_type_); auto output_bit_width = xla::primitive_util::BitWidth(dst_type_); OP_REQUIRES(ctx, output_bit_width % input_bit_width == 0 || input_bit_width % output_bit_width == 0, errors::InvalidArgument( "Neither bit width is a multiple of the other.")); output = xla::BitcastConvertType(input, dst_type_); ctx->SetOutput(0, output); } protected: DataType src_dtype_, dst_dtype_; xla::PrimitiveType src_type_, dst_type_; BitcastOp(const BitcastOp&) = delete; void operator=(const BitcastOp&) = delete; }; REGISTER_XLA_OP(Name("Bitcast"), BitcastOp); } }
#include <cstdint> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/bfloat16.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" using Eigen::half; namespace tensorflow { template <typename Src, typename Dst> static Graph* Cast(int num) { Graph* g = new Graph(OpRegistry::Global()); Tensor data(DataTypeToEnum<Src>::value, TensorShape({64, 64, num / (64 * 64)})); data.flat<Src>().setRandom(); test::graph::Cast(g, test::graph::Constant(g, data), DataTypeToEnum<Dst>::value); return g; } class CastOpTest : public OpsTestBase { protected: void MakeOp(DataType src, DataType dst, bool trunc) { if (trunc) { TF_EXPECT_OK(NodeDefBuilder("cast_op", "Cast") .Input(FakeInput(src)) .Attr("SrcT", src) .Attr("DstT", dst) .Attr("Truncate", true) .Finalize(node_def())); } else { TF_EXPECT_OK(NodeDefBuilder("cast_op", "Cast") .Input(FakeInput(src)) .Attr("SrcT", src) .Attr("DstT", dst) .Finalize(node_def())); } TF_EXPECT_OK(InitOp()); } template <typename INPUT, typename OUTPUT> void CheckCast(bool trunc) { DataType in_type = DataTypeToEnum<INPUT>::v(); DataType out_type = DataTypeToEnum<OUTPUT>::v(); MakeOp(in_type, out_type, trunc); AddInputFromArray<INPUT>(TensorShape({1, 2, 2, 1}), {INPUT(1), INPUT(2), INPUT(3), INPUT(4)}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), out_type, TensorShape({1, 2, 2, 1})); test::FillValues<OUTPUT>(&expected, {OUTPUT(1), OUTPUT(2), OUTPUT(3), OUTPUT(4)}); test::ExpectTensorEqual<OUTPUT>(expected, *GetOutput(0)); } }; #define TEST_CAST(in, out) \ TEST_F(CastOpTest, TestCast##_##in##_##out) { CheckCast<in, out>(false); } \ TEST_F(CastOpTest, TestCastTruncate_##_##in##_##out) { \ CheckCast<in, out>(true); \ } #define TEST_ALL_CASTS_FROM(in) \ TEST_CAST(in, uint8) \ TEST_CAST(in, uint16) \ TEST_CAST(in, uint32) \ TEST_CAST(in, uint64) \ TEST_CAST(in, int8) \ TEST_CAST(in, int16) \ TEST_CAST(in, int32) \ TEST_CAST(in, int64_t) \ TEST_CAST(in, half) \ TEST_CAST(in, float) \ TEST_CAST(in, double) \ TEST_CAST(in, bfloat16) \ TEST_CAST(in, quint8) \ TEST_CAST(in, qint8) \ TEST_CAST(in, qint32) \ TEST_CAST(in, qint16) \ TEST_CAST(in, quint16) TEST_ALL_CASTS_FROM(uint8) TEST_ALL_CASTS_FROM(uint16) TEST_ALL_CASTS_FROM(uint32) TEST_ALL_CASTS_FROM(uint64) TEST_ALL_CASTS_FROM(int16) TEST_ALL_CASTS_FROM(int32) TEST_ALL_CASTS_FROM(int64_t) TEST_ALL_CASTS_FROM(half) TEST_ALL_CASTS_FROM(float) TEST_ALL_CASTS_FROM(double) TEST_ALL_CASTS_FROM(bfloat16) TEST_ALL_CASTS_FROM(quint8) TEST_ALL_CASTS_FROM(qint8) TEST_ALL_CASTS_FROM(qint32) TEST_ALL_CASTS_FROM(qint16) TEST_ALL_CASTS_FROM(quint16) #undef TEST_ALL_CASTS_FROM #define TEST_INT_CASTS_FROM(in) \ TEST_CAST(in, uint8) \ TEST_CAST(in, uint16) \ TEST_CAST(in, uint32) \ TEST_CAST(in, uint64) \ TEST_CAST(in, int8) \ TEST_CAST(in, int16) \ TEST_CAST(in, int32) \ TEST_CAST(in, int64_t) #define TEST_INT_CASTS_TO(out) \ TEST_CAST(uint8, out) \ TEST_CAST(uint16, out) \ TEST_CAST(uint32, out) \ TEST_CAST(uint64, out) \ TEST_CAST(int8, out) \ TEST_CAST(int16, out) \ TEST_CAST(int32, out) \ TEST_CAST(int64_t, out) TEST_INT_CASTS_FROM(int4) TEST_INT_CASTS_FROM(uint4) TEST_INT_CASTS_TO(int4) TEST_INT_CASTS_TO(uint4) TEST_CAST(int4, int4) TEST_CAST(int4, uint4) TEST_CAST(uint4, int4) TEST_CAST(uint4, uint4) #undef TEST_INT_CASTS_FROM #undef TEST_INT_CASTS_TO #undef TEST_CAST static void BM_cpu_float_int64(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", Cast<float, int64_t>(num), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(int64_t))); } BENCHMARK(BM_cpu_float_int64)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_gpu_float_int64(::testing::benchmark::State& state) { const int num = state.range(0); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM test::Benchmark("gpu", Cast<float, int64_t>(num), false) .Run(state); #endif state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(int64_t))); } BENCHMARK(BM_gpu_float_int64)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_cpu_bool_float(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", Cast<bool, float>(num), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(bool) + sizeof(float))); } BENCHMARK(BM_cpu_bool_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_gpu_bool_float(::testing::benchmark::State& state) { const int num = state.range(0); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM test::Benchmark("gpu", Cast<bool, float>(num), false) .Run(state); #endif state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(bool) + sizeof(float))); } BENCHMARK(BM_gpu_bool_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_cpu_float_bfloat16(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", Cast<float, bfloat16>(num), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(bfloat16))); } BENCHMARK(BM_cpu_float_bfloat16)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_cpu_bfloat16_float(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", Cast<bfloat16, float>(num), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(bfloat16))); } BENCHMARK(BM_cpu_bfloat16_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_cpu_float_half(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", Cast<float, Eigen::half>(num), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(Eigen::half))); } BENCHMARK(BM_cpu_float_half)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_cpu_half_float(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", Cast<Eigen::half, float>(num), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(Eigen::half))); } BENCHMARK(BM_cpu_half_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_gpu_float_half(::testing::benchmark::State& state) { const int num = state.range(0); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM test::Benchmark("gpu", Cast<float, Eigen::half>(num), false) .Run(state); #endif state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(Eigen::half))); } BENCHMARK(BM_gpu_float_half)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); static void BM_gpu_half_float(::testing::benchmark::State& state) { const int num = state.range(0); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM test::Benchmark("gpu", Cast<Eigen::half, float>(num), false) .Run(state); #endif state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num * (sizeof(float) + sizeof(Eigen::half))); } BENCHMARK(BM_gpu_half_float)->UseRealTime()->Arg(64 << 10)->Arg(32 << 20); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/cast_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/cast_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
26c1a10a-6bbe-4bc1-ab95-53a6c2565069
cpp
tensorflow/tensorflow
unique_op
tensorflow/compiler/tf2xla/kernels/unique_op.cc
tensorflow/core/kernels/unique_op_test.cc
#include <sys/types.h> #include <memory> #include <optional> #include <utility> #include <vector> #include "absl/status/statusor.h" #include "absl/types/optional.h" #include "absl/types/span.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/comparison_util.h" #include "xla/hlo/builder/lib/arithmetic.h" #include "xla/hlo/builder/lib/comparators.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/builder/xla_computation.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { class UniqueOpBase : public XlaOpKernel { public: explicit UniqueOpBase(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { DataType dtype; OP_REQUIRES_OK(ctx, ctx->GetAttr("out_idx", &dtype)); OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(dtype, &idx_type_)); } xla::XlaOp MoveAxis(xla::XlaOp a, int64_t from, int64_t to, const xla::Shape& input_shape) { std::vector<int64_t> permutation; permutation.reserve(input_shape.rank()); for (int64_t i = 0; i < input_shape.rank(); ++i) { permutation.push_back(i); } std::swap(permutation[from], permutation[to]); return xla::Transpose(a, permutation); } xla::XlaOp CumSumR1(XlaOpKernelContext* ctx, xla::XlaOp input, int64_t size) { auto init = xla::Zero(ctx->builder(), xla::S32); auto reducer = xla::CreateScalarAddComputation(xla::S32, ctx->builder()); return xla::ReduceWindowWithGeneralPadding( input, init, reducer, {size}, {1}, {}, {}, {{size - 1, 0}}); } xla::XlaOp RollingSelectR1(XlaOpKernelContext* ctx, xla::XlaOp data, xla::XlaOp mask, int64_t size) { xla::XlaComputation cond, body; const xla::Shape r1_shape = xla::ShapeUtil::MakeShape(xla::S32, {size}); const xla::Shape counter_shape = xla::ShapeUtil::MakeScalarShape(xla::S32); const xla::Shape& single_element_shape = counter_shape; auto loop_shape = xla::ShapeUtil::MakeTupleShape( {counter_shape, r1_shape, r1_shape, r1_shape}); { std::unique_ptr<xla::XlaBuilder> builder = ctx->builder()->CreateSubBuilder("loop_cond"); auto param = xla::Parameter(builder.get(), 0, loop_shape, "param"); auto counter = xla::GetTupleElement(param, 0); auto limit = xla::ConstantR0<int32_t>(builder.get(), size); xla::Lt(counter, limit); cond = builder->Build().value(); } { std::unique_ptr<xla::XlaBuilder> builder = ctx->builder()->CreateSubBuilder("loop_body"); auto param = xla::Parameter(builder.get(), 0, loop_shape, "param"); auto counter = xla::GetTupleElement(param, 0); auto data_stack = xla::GetTupleElement(param, 1); auto data = xla::DynamicSlice(data_stack, {counter}, {1}); data = xla::Reshape(single_element_shape, data); auto mask_stack = xla::GetTupleElement(param, 2); auto mask = xla::DynamicSlice(mask_stack, {counter}, {1}); mask = xla::Reshape(single_element_shape, mask); auto counter_minus = counter - xla::One(builder.get(), xla::S32); auto zero = xla::Zero(builder.get(), xla::S32); counter_minus = xla::Select(xla::Eq(counter, zero), zero, counter_minus); auto accum_stack = xla::GetTupleElement(param, 3); auto accum_minus = xla::DynamicSlice(accum_stack, {counter_minus}, {1}); accum_minus = xla::Reshape(single_element_shape, accum_minus); auto accum = xla::Select(xla::ConvertElementType(mask, xla::PRED), data, accum_minus); accum_stack = xla::DynamicUpdateSlice( accum_stack, xla::Reshape(accum, {1}), {counter}); counter = counter + xla::One(builder.get(), xla::S32); xla::Tuple(builder.get(), {counter, data_stack, mask_stack, accum_stack}); body = builder->Build().value(); } auto zero = xla::Zero(ctx->builder(), xla::S32); auto zero_broadcast = xla::Broadcast(zero, {size}); auto init = xla::Tuple(ctx->builder(), {zero, data, mask, zero_broadcast}); return xla::GetTupleElement(xla::While(cond, body, init), 3); } void CompileWithAxis(XlaOpKernelContext* ctx, int64_t axis) { xla::XlaOp input = ctx->Input(0); absl::StatusOr<xla::Shape> input_shape_or = ctx->builder()->GetShape(input); OP_REQUIRES_OK(ctx, input_shape_or.status()); auto input_shape = input_shape_or.value(); axis = axis < 0 ? axis + input_shape.rank() : axis; OP_REQUIRES(ctx, 0 <= axis && axis < input_shape.rank(), errors::InvalidArgument("axis has to be between [0, ", input_shape.rank(), ")")); auto aux = MoveAxis(input, axis, 0, input_shape); auto aux_shape = ctx->builder()->GetShape(aux).value(); int64_t leading_size = aux_shape.dimensions(0); int64_t product = 1; for (int64_t i = 1; i < aux_shape.rank(); ++i) { product *= aux_shape.dimensions(i); } aux = xla::Reshape(aux, {leading_size, product}); if (leading_size == 0) { auto result_data = xla::Reshape(aux, aux_shape.dimensions()); result_data = MoveAxis(result_data, 0, axis, aux_shape); ctx->SetOutput(0, result_data); ctx->SetOutput(1, xla::Iota(ctx->builder(), xla::S32, leading_size)); return; } std::vector<xla::XlaOp> sort_keys; sort_keys.reserve(product + 1); std::vector<xla::PrimitiveType> sort_types; sort_types.reserve(product + 1); for (int64_t i = 0; i < product; ++i) { xla::XlaOp slice = xla::SliceInDim(aux, i, i + 1, 1, 1); sort_keys.push_back(xla::Reshape(slice, {leading_size})); sort_types.push_back(input_shape.element_type()); } auto iota = xla::Iota(ctx->builder(), xla::S32, leading_size); sort_keys.push_back(iota); sort_types.push_back(xla::S32); std::vector<std::optional<xla::XlaOp (*)(xla::XlaOp, xla::XlaOp, absl::Span<const int64_t>)>> generators(sort_types.size(), xla::LtTotalOrder); auto lt_chain = xla::CreateScalarComparisonComputation( "UniqueV2Lt", sort_types, generators, ctx->builder()); auto sorted = xla::Sort(sort_keys, lt_chain, 0, true); xla::XlaOp perm; if (sort_keys.size() == 1) { perm = sorted; } else { perm = xla::GetTupleElement(sorted, sort_keys.size() - 1); } xla::GatherDimensionNumbers gather_dim_numbers; gather_dim_numbers.add_offset_dims(1); gather_dim_numbers.add_start_index_map(0); gather_dim_numbers.set_index_vector_dim(1); gather_dim_numbers.add_collapsed_slice_dims(0); auto permuted = xla::Gather(aux, perm, gather_dim_numbers, {1, product}); auto tail = xla::SliceInDim(permuted, 1, leading_size, 1, 0); auto init = xla::SliceInDim(permuted, 0, leading_size - 1, 1, 0); auto ne = xla::Compare(tail, init, xla::ComparisonDirection::kNe); auto reduce = xla::Reduce(ne, xla::ConstantR0(ctx->builder(), false), CreateScalarOrComputation(xla::PRED, ctx->builder()), {1}); auto mask = xla::ConvertElementType(reduce, xla::S32); mask = xla::PadInDim(mask, xla::One(ctx->builder(), xla::S32), 0, 1, 0); auto iperm = RollingSelectR1(ctx, perm, mask, leading_size); auto sort_by_iperm = xla::Sort({iperm, mask, perm}, xla::CreateScalarLtComputation({xla::S32, xla::S32, xla::S32}, ctx->builder()), 0, true); mask = xla::GetTupleElement(sort_by_iperm, 1); auto perm_sort = xla::GetTupleElement(sort_by_iperm, 2); auto dynamic_size = xla::ReduceAll( mask, xla::Zero(ctx->builder(), xla::S32), xla::CreateScalarAddComputation(xla::S32, ctx->builder())); auto mask_sort = xla::Sort( {mask, perm_sort}, xla::CreateScalarGtComputation({xla::S32, xla::S32}, ctx->builder()), 0, true); auto mask_permute = xla::GetTupleElement(mask_sort, 1); permuted = xla::Gather(aux, mask_permute, gather_dim_numbers, {1, product}); auto result_data = xla::Reshape(permuted, aux_shape.dimensions()); result_data = MoveAxis(result_data, 0, axis, aux_shape); result_data = xla::SetDimensionSize(result_data, dynamic_size, axis); ctx->SetOutput(0, result_data); auto imask = CumSumR1(ctx, mask, leading_size); imask = xla::Sub(imask, xla::One(ctx->builder(), xla::S32), {}); auto idx = xla::GetTupleElement( xla::Sort({perm_sort, imask}, xla::CreateScalarLtComputation({xla::S32, xla::S32}, ctx->builder())), 1); idx = xla::ConvertElementType(idx, idx_type_); ctx->SetOutput(1, idx); } private: xla::PrimitiveType idx_type_; }; class UniqueOp : public UniqueOpBase { public: explicit UniqueOp(OpKernelConstruction* ctx) : UniqueOpBase(ctx) {} void Compile(XlaOpKernelContext* ctx) override { CompileWithAxis(ctx, 0); } }; REGISTER_XLA_OP(Name("Unique"), UniqueOp); class UniqueV2Op : public UniqueOpBase { public: explicit UniqueV2Op(OpKernelConstruction* ctx) : UniqueOpBase(ctx) {} void Compile(XlaOpKernelContext* ctx) override { std::vector<int64_t> axises; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector(1, &axises)); OP_REQUIRES( ctx, axises.size() <= 1, xla::InvalidArgument("Only single axis unique op is supported")); int64_t axis; if (axises.empty()) { axis = 0; } else { axis = axises.front(); } CompileWithAxis(ctx, axis); } }; REGISTER_XLA_OP(Name("UniqueV2").CompileTimeConstantInput("axis"), UniqueV2Op); } }
#include <functional> #include <memory> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { const int kMaxStrLen = 40; TensorProto GetRandomInt32TensorProto(int dim, int max_int) { TensorProto tensor_proto; tensor_proto.set_dtype(DT_INT32); tensor_proto.mutable_tensor_shape()->add_dim()->set_size(dim); tensor_proto.mutable_tensor_shape()->set_unknown_rank(false); for (int i = 0; i < dim; ++i) { const int int_val = std::rand() % max_int; tensor_proto.add_int_val(int_val); } return tensor_proto; } TensorProto GetRandomInt32TensorProtoWithRepeat(int dim, int repeat, int max_int) { TensorProto tensor_proto; tensor_proto.set_dtype(DT_INT32); tensor_proto.mutable_tensor_shape()->add_dim()->set_size(dim); tensor_proto.mutable_tensor_shape()->set_unknown_rank(false); for (int i = 0; i < dim; ++i) { const int int_val = std::rand() % max_int; for (int j = 0; j < repeat; ++j) { tensor_proto.add_int_val(int_val); } } return tensor_proto; } void BM_Unique_INT32(::testing::benchmark::State& state) { const int dim = state.range(0); const int max_int = state.range(1); Graph* g = new Graph(OpRegistry::Global()); Tensor input(DT_INT32, TensorShape({dim})); CHECK(input.FromProto(GetRandomInt32TensorProto(dim, max_int))); Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Unique") .Input(test::graph::Constant(g, input)) .Attr("T", DT_INT32) .Finalize(g, &node)); FixupSourceAndSinkEdges(g); test::Benchmark("cpu", g, nullptr, nullptr, nullptr, "SINGLE_THREADED_EXECUTOR", false) .Run(state); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * dim * sizeof(int32)); } void BM_Unique_INT32_Repeat(::testing::benchmark::State& state) { const int dim = state.range(0); const int max_int = state.range(1); Graph* g = new Graph(OpRegistry::Global()); Tensor input(DT_INT32, TensorShape({dim * 200})); CHECK( input.FromProto(GetRandomInt32TensorProtoWithRepeat(dim, 200, max_int))); Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Unique") .Input(test::graph::Constant(g, input)) .Attr("T", DT_INT32) .Finalize(g, &node)); FixupSourceAndSinkEdges(g); test::Benchmark("cpu", g, nullptr, nullptr, nullptr, "SINGLE_THREADED_EXECUTOR", false) .Run(state); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * dim * 200 * sizeof(int32)); } TensorProto GetRandomStringsTensorProto(int dim, int max_str_len) { TensorProto tensor_proto; tensor_proto.set_dtype(DT_STRING); tensor_proto.mutable_tensor_shape()->add_dim()->set_size(dim); tensor_proto.mutable_tensor_shape()->set_unknown_rank(false); for (int i = 0; i < dim; ++i) { const int len = std::rand() % max_str_len + 1; string rand_str; rand_str.resize(len); for (int j = 0; j < len; ++j) { rand_str[j] = static_cast<char>(j % 256); } tensor_proto.add_string_val(rand_str); } return tensor_proto; } void BM_Unique_STRING(::testing::benchmark::State& state) { const int dim = state.range(0); Graph* g = new Graph(OpRegistry::Global()); Tensor input(DT_STRING, TensorShape({dim})); CHECK(input.FromProto(GetRandomStringsTensorProto(dim, kMaxStrLen))); Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Unique") .Input(test::graph::Constant(g, input)) .Attr("T", DT_STRING) .Finalize(g, &node)); FixupSourceAndSinkEdges(g); test::Benchmark("cpu", g, nullptr, nullptr, nullptr, "SINGLE_THREADED_EXECUTOR", false) .Run(state); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * dim * sizeof(tstring)); } BENCHMARK(BM_Unique_INT32) ->UseRealTime() ->ArgPair(32, 1024 * 1024) ->ArgPair(256, 1024 * 1024) ->ArgPair(1024, 1024 * 1024) ->ArgPair(4 * 1024, 1024 * 1024) ->ArgPair(16 * 1024, 1024 * 1024) ->ArgPair(64 * 1024, 1024 * 1024) ->ArgPair(1024 * 1024, 1024 * 1024) ->ArgPair(4 * 1024 * 1024, 1024 * 1024) ->ArgPair(32, 64 * 1024 * 1024) ->ArgPair(256, 64 * 1024 * 1024) ->ArgPair(1024, 64 * 1024 * 1024) ->ArgPair(4 * 1024, 64 * 1024 * 1024) ->ArgPair(16 * 1024, 64 * 1024 * 1024) ->ArgPair(64 * 1024, 64 * 1024 * 1024) ->ArgPair(1024 * 1024, 64 * 1024 * 1024) ->ArgPair(4 * 1024 * 1024, 64 * 1024 * 1024); BENCHMARK(BM_Unique_INT32_Repeat) ->UseRealTime() ->ArgPair(32, 1024 * 1024) ->ArgPair(256, 1024 * 1024) ->ArgPair(1024, 1024 * 1024) ->ArgPair(4 * 1024, 1024 * 1024) ->ArgPair(16 * 1024, 1024 * 1024) ->ArgPair(64 * 1024, 1024 * 1024) ->ArgPair(1024 * 1024, 1024 * 1024) ->ArgPair(4 * 1024 * 1024, 1024 * 1024) ->ArgPair(32, 32 * 1024 * 1024) ->ArgPair(256, 32 * 1024 * 1024) ->ArgPair(1024, 32 * 1024 * 1024) ->ArgPair(4 * 1024, 32 * 1024 * 1024) ->ArgPair(16 * 1024, 32 * 1024 * 1024) ->ArgPair(64 * 1024, 32 * 1024 * 1024) ->ArgPair(1024 * 1024, 32 * 1024 * 1024) ->ArgPair(32, 64 * 1024 * 1024) ->ArgPair(256, 64 * 1024 * 1024) ->ArgPair(1024, 64 * 1024 * 1024) ->ArgPair(4 * 1024, 64 * 1024 * 1024) ->ArgPair(16 * 1024, 64 * 1024 * 1024) ->ArgPair(64 * 1024, 64 * 1024 * 1024) ->ArgPair(1024 * 1024, 64 * 1024 * 1024); BENCHMARK(BM_Unique_STRING) ->UseRealTime() ->Arg(32) ->Arg(256) ->Arg(1024) ->Arg(4 * 1024) ->Arg(16 * 1024) ->Arg(64 * 1024) ->Arg(256 * 1024); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/unique_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/unique_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ff613e0f-18e1-4510-891c-0d95e457542e
cpp
tensorflow/tensorflow
bias_op
tensorflow/core/kernels/bias_op.cc
tensorflow/core/kernels/bias_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/bias_op.h" #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/redux_functor.h" #include "tensorflow/core/profiler/lib/scoped_annotation.h" #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/tensor_format.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "xla/stream_executor/event_based_timer.h" #include "tensorflow/core/kernels/bias_op_gpu.h" #include "tensorflow/core/platform/stream_executor.h" #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace { void GetBiasValueDims(const Tensor& value_tensor, TensorFormat data_format, int32* batch, int32* height, int32* width, int32* depth, int32* channel) { *batch = 1; *height = 1; *width = 1; *depth = 1; *channel = 1; if (data_format == FORMAT_NHWC) { int32_t channel_dim = value_tensor.dims() - 1; *channel = static_cast<int32>(value_tensor.dim_size(channel_dim)); for (int32_t i = 0; i < channel_dim; i++) { *batch *= static_cast<int32>(value_tensor.dim_size(i)); } } else if (data_format == FORMAT_NCHW) { *batch = static_cast<int32>(value_tensor.dim_size(0)); *channel = static_cast<int32>(value_tensor.dim_size(1)); *height = static_cast<int32>(value_tensor.dim_size(2)); if (value_tensor.dims() > 3) { *width = static_cast<int32>(value_tensor.dim_size(3)); } if (value_tensor.dims() > 4) { *depth = static_cast<int32>(value_tensor.dim_size(4)); } } } template <class T> struct AccumulatorType { typedef T type; }; template <> struct AccumulatorType<Eigen::half> { typedef float type; }; } template <typename Device, typename T> class BiasOp : public BinaryOp<T> { public: explicit BiasOp(OpKernelConstruction* context) : BinaryOp<T>(context) { string data_format; if (context->GetAttr("data_format", &data_format).ok()) { OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } else { data_format_ = FORMAT_NHWC; } } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& bias = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()), errors::InvalidArgument("Input tensor must be at least 2D: ", input.shape())); OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()), errors::InvalidArgument("Biases must be 1D: ", bias.shape())); int channel_dim; if (data_format_ == FORMAT_NCHW) { channel_dim = 1; } else { channel_dim = input.shape().dims() - 1; } OP_REQUIRES(context, bias.shape().dim_size(0) == input.shape().dim_size(channel_dim), errors::InvalidArgument( "Must provide as many biases as the last dimension " "of the input tensor: ", bias.shape(), " vs. ", input.shape())); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, input.shape(), &output)); if (input.NumElements() == 0) return; functor::Bias<Device, T> functor; const Device& d = context->eigen_device<Device>(); if (data_format_ == FORMAT_NCHW && input.shape().dims() > 2) { functor(d, input.flat_inner_outer_dims<T, 2>(1), bias.flat_outer_dims<T, 2>(), output->flat_inner_outer_dims<T, 2>(1)); } else { functor(d, input.flat<T>(), bias.vec<T>(), output->flat<T>()); } } private: TensorFormat data_format_; }; #define REGISTER_KERNEL(type) \ REGISTER_KERNEL_BUILDER( \ Name("BiasAdd").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ BiasOp<CPUDevice, type>); \ REGISTER_KERNEL_BUILDER( \ Name("BiasAddV1").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ BiasOp<CPUDevice, type>); TF_CALL_NUMBER_TYPES(REGISTER_KERNEL); #undef REGISTER_KERNEL template <typename Device, typename T> class BiasGradOp : public OpKernel { public: explicit BiasGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; if (context->GetAttr("data_format", &data_format).ok()) { OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } else { data_format_ = FORMAT_NHWC; } } void Compute(OpKernelContext* context) override { const Tensor& output_backprop = context->input(0); OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(output_backprop.shape()), errors::InvalidArgument("Input tensor must be at least 2D: ", output_backprop.shape())); OP_REQUIRES( context, FastBoundsCheck(output_backprop.NumElements(), std::numeric_limits<int32>::max()), errors::InvalidArgument("BiasGrad requires tensor size <= int32 max")); int channel_dim; if (data_format_ == FORMAT_NCHW) { channel_dim = 1; } else { channel_dim = output_backprop.shape().dims() - 1; } Tensor* output = nullptr; TensorShape output_shape{output_backprop.shape().dim_size(channel_dim)}; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); if (output_backprop.NumElements() == 0) { output->template flat<T>().setZero(); } else { using AccumT = typename AccumulatorType<T>::type; if (data_format_ == FORMAT_NCHW) { const functor::ReduceMiddleDimensions< T, AccumT, T, Eigen::internal::scalar_sum_op<AccumT>, Eigen::internal::SumReducer<T>> redux; auto flat_outer = output_backprop.flat_outer_dims<T, 3>(); redux(context->eigen_device<Device>(), flat_outer.dimensions(), output_backprop, output, 1); } else { const functor::ReduceOuterDimensions< T, AccumT, T, Eigen::internal::scalar_sum_op<AccumT>> redux; auto flat_inner = output_backprop.flat_inner_dims<T, 2>(); redux(context->eigen_device<Device>(), flat_inner.dimensions(), output_backprop, output); } } } private: TensorFormat data_format_; }; #define REGISTER_KERNEL(type) \ REGISTER_KERNEL_BUILDER( \ Name("BiasAddGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ BiasGradOp<CPUDevice, type>); TF_CALL_NUMBER_TYPES(REGISTER_KERNEL); #undef REGISTER_KERNEL #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> class BiasOp<GPUDevice, T> : public BinaryOp<T> { public: typedef GPUDevice Device; explicit BiasOp(OpKernelConstruction* context) : BinaryOp<T>(context) { string data_format; if (context->GetAttr("data_format", &data_format).ok()) { OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } else { data_format_ = FORMAT_NHWC; } } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& bias = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()), errors::InvalidArgument("Input tensor must be at least 2D: ", input.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()), errors::InvalidArgument("Biases must be 1D: ", bias.shape().DebugString())); int32_t batch, height, width, depth, channel; GetBiasValueDims(input, data_format_, &batch, &height, &width, &depth, &channel); OP_REQUIRES(context, bias.shape().dim_size(0) == channel, errors::InvalidArgument( "Must provide as many biases as the channel dimension " "of the input tensor: ", bias.shape().DebugString(), " vs. ", channel, " in ", input.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, input.shape(), &output)); if (input.NumElements() > 0) { BiasGPU<T>::compute(context->template eigen_device<Device>(), input.flat<T>().data(), bias.flat<T>().data(), output->flat<T>().data(), batch, width, height, depth, channel, data_format_); } } private: TensorFormat data_format_; }; #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER( \ Name("BiasAdd").Device(DEVICE_GPU).TypeConstraint<type>("T"), \ BiasOp<GPUDevice, type>); \ REGISTER_KERNEL_BUILDER( \ Name("BiasAddV1").Device(DEVICE_GPU).TypeConstraint<type>("T"), \ BiasOp<GPUDevice, type>); TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL); REGISTER_GPU_KERNEL(int32); #undef REGISTER_GPU_KERNEL struct BiasGradAutotuneGroup { static string name() { return "BiasGrad"; } }; class BiasAddGradGPUConfig { public: BiasAddGradGPUConfig() : mode_(BiasAddGradGPUMode::kReduction) {} string ToString() const { if (mode_ == BiasAddGradGPUMode::kNative) { return "native CUDA kernel."; } if (mode_ == BiasAddGradGPUMode::kReduction) { return "cub reduction kernel."; } return "unknown kernel."; } BiasAddGradGPUMode get_mode() const { return mode_; } void set_mode(BiasAddGradGPUMode val) { mode_ = val; } bool operator==(const BiasAddGradGPUConfig& other) const { return this->mode_ == other.get_mode(); } bool operator!=(const BiasAddGradGPUConfig& other) const { return !(*this == other); } private: BiasAddGradGPUMode mode_; }; class BiasAddParams { public: using SpatialArray = gtl::InlinedVector<int64_t, 4>; BiasAddParams(const SpatialArray& in_shape, TensorFormat data_format, DataType dtype, int device_id) : in_shape_(in_shape), data_format_(data_format), dtype_(dtype), device_id_(device_id) { for (int64_t val : in_shape_) { hash_code_ = Hash64Combine(hash_code_, val); } hash_code_ = Hash64Combine(hash_code_, data_format); hash_code_ = Hash64Combine(hash_code_, dtype); hash_code_ = Hash64Combine(hash_code_, device_id); } bool operator==(const BiasAddParams& other) const { return this->get_data_as_tuple() == other.get_data_as_tuple(); } bool operator!=(const BiasAddParams& other) const { return !(*this == other); } uint64 hash() const { return hash_code_; } string ToString() const { return strings::StrCat( "(", absl::StrJoin(in_shape_, ", "), "), ", data_format_, ", ", dtype_, ", ", device_id_); } protected: using ParamsDataType = std::tuple<SpatialArray, TensorFormat, DataType, int>; ParamsDataType get_data_as_tuple() const { return std::make_tuple(in_shape_, data_format_, dtype_, device_id_); } uint64 hash_code_ = 0; private: SpatialArray in_shape_; TensorFormat data_format_; DataType dtype_; int device_id_; }; typedef AutotuneSingleton<BiasGradAutotuneGroup, BiasAddParams, BiasAddGradGPUConfig> AutotuneBiasGrad; template <typename T> class BiasGradOp<GPUDevice, T> : public OpKernel { public: typedef GPUDevice Device; explicit BiasGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; if (context->GetAttr("data_format", &data_format).ok()) { OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } else { data_format_ = FORMAT_NCHW; } } void ComputeWithCustomKernel(OpKernelContext* context, const Tensor& output_backprop, int32_t batch, int32_t width, int32_t height, int32_t depth, int32_t channel, Tensor* output) { BiasGradGPU<T>::compute(context->template eigen_device<Device>(), output_backprop.template flat<T>().data(), output->flat<T>().data(), batch, width, height, depth, channel, data_format_); } void ComputeWithReduceSum(OpKernelContext* context, const Tensor& output_backprop, int32_t batch, int32_t width, int32_t height, int32_t depth, int32_t channel, Tensor* output) { if (data_format_ == FORMAT_NCHW) { int32_t row_count = batch * channel; int32_t col_count = height * width * depth; Tensor temp_grad_outputs; TensorShape temp_grad_output_shape{row_count, col_count}; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, temp_grad_output_shape, &temp_grad_outputs)); BiasGradGPU<T>::DoRowReduction( context, temp_grad_outputs.flat<T>().data(), output_backprop.template flat<T>().data(), row_count, col_count); row_count = batch; col_count = channel; BiasGradGPU<T>::DoColReduction(context, output->flat<T>().data(), temp_grad_outputs.flat<T>().data(), row_count, col_count); } else { int32_t row_count = batch * height * width * depth; int32_t col_count = channel; BiasGradGPU<T>::DoColReduction( context, const_cast<T*>(output->flat<T>().data()), reinterpret_cast<const T*>(output_backprop.template flat<T>().data()), row_count, col_count); } } void Compute(OpKernelContext* context) override { const Tensor& output_backprop = context->input(0); OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(output_backprop.shape()), errors::InvalidArgument("Input tensor must be at least 2D: ", output_backprop.shape().DebugString())); int32_t batch, height, width, depth, channel; GetBiasValueDims(output_backprop, data_format_, &batch, &height, &width, &depth, &channel); Tensor* output = nullptr; TensorShape output_shape{channel}; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); if (channel == 0) return; auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); se::DeviceMemoryBase output_ptr(output->flat<T>().data(), output->NumElements() * sizeof(T)); OP_REQUIRES_OK(context, stream->MemZero(&output_ptr, output->NumElements() * sizeof(T))); if (output_backprop.NumElements() <= 0) return; if (OpDeterminismRequired()) { ComputeWithReduceSum(context, output_backprop, batch, width, height, depth, channel, output); return; } int device_id = stream->parent()->device_ordinal(); DataType dtype = output_backprop.dtype(); BiasAddParams bias_parameters = { {batch, height * width * depth, channel}, data_format_, dtype, device_id, }; BiasAddGradGPUConfig algo_config; if (!AutotuneBiasGrad::GetInstance()->Find(bias_parameters, &algo_config)) { profiler::ScopedAnnotation trace("bias_grad_autotuning"); BiasGradGPUProfileResult best_result; StatusOr<std::unique_ptr<se::EventBasedTimer>> timer = stream->CreateEventBasedTimer(false); OP_REQUIRES_OK(context, timer.status()); ComputeWithCustomKernel(context, output_backprop, batch, width, height, depth, channel, output); StatusOr<absl::Duration> bias_duration = timer.value()->GetElapsedDuration(); OP_REQUIRES_OK(context, bias_duration.status()); int64_t elapsed_microseconds = absl::ToInt64Microseconds(*bias_duration); VLOG(1) << "BiasAddGrad " << bias_parameters.ToString() << " Native algo latency: " << elapsed_microseconds << "us"; if (elapsed_microseconds < best_result.elapsed_time()) { best_result.set_algorithm(BiasAddGradGPUMode::kNative); best_result.set_elapsed_time(elapsed_microseconds); } StatusOr<std::unique_ptr<se::EventBasedTimer>> reduction_timer = stream->CreateEventBasedTimer(false); OP_REQUIRES_OK(context, reduction_timer.status()); ComputeWithReduceSum(context, output_backprop, batch, width, height, depth, channel, output); StatusOr<absl::Duration> reduction_duration = reduction_timer.value()->GetElapsedDuration(); OP_REQUIRES_OK(context, reduction_duration.status()); elapsed_microseconds += absl::ToInt64Microseconds(*reduction_duration); VLOG(1) << "BiasAddGrad " << bias_parameters.ToString() << " Reduction algo latency: " << elapsed_microseconds; if (elapsed_microseconds < best_result.elapsed_time()) { best_result.set_algorithm(BiasAddGradGPUMode::kReduction); best_result.set_elapsed_time(elapsed_microseconds); } algo_config.set_mode(best_result.algorithm()); AutotuneBiasGrad::GetInstance()->Insert(bias_parameters, algo_config); return; } if (algo_config.get_mode() == BiasAddGradGPUMode::kReduction) { ComputeWithReduceSum(context, output_backprop, batch, width, height, depth, channel, output); } else { ComputeWithCustomKernel(context, output_backprop, batch, width, height, depth, channel, output); } } private: TensorFormat data_format_; }; #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER( \ Name("BiasAddGrad").Device(DEVICE_GPU).TypeConstraint<type>("T"), \ BiasGradOp<GPUDevice, type>); TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL #endif }
#include "tensorflow/core/kernels/bias_op.h" #include <random> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* BiasAdd(int d0, int d1, int d2, int d3) { auto* g = new Graph(OpRegistry::Global()); Tensor input(DT_FLOAT, TensorShape({d0, d1, d2, d3})); Tensor bias(DT_FLOAT, TensorShape({d3})); input.flat<float>().setRandom(); bias.flat<float>().setRandom(); test::graph::Binary(g, "BiasAdd", test::graph::Constant(g, input), test::graph::Constant(g, bias)); return g; } static Graph* BiasAddGrad(int d0, int d1, int d2, int d3) { auto* g = new Graph(OpRegistry::Global()); Tensor out_backprop(DT_FLOAT, TensorShape({d0, d1, d2, d3})); out_backprop.flat<float>().setRandom(); test::graph::Unary(g, "BiasAddGrad", test::graph::Constant(g, out_backprop)); return g; } #define BM_BiasAddNHWC(N, W, H, C, DEVICE) \ static void BM_BiasAddNHWC##_##N##_##H##_##W##_##C##_##DEVICE( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, BiasAdd(N, H, W, C), false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N * H * \ W * C); \ } \ BENCHMARK(BM_BiasAddNHWC##_##N##_##H##_##W##_##C##_##DEVICE)->UseRealTime(); #define BM_BiasAddGradNHWC(N, W, H, C, DEVICE) \ static void BM_BiasAddGradNHWC##_##N##_##H##_##W##_##C##_##DEVICE( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, BiasAddGrad(N, H, W, C), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N * H * \ W * C); \ } \ BENCHMARK(BM_BiasAddGradNHWC##_##N##_##H##_##W##_##C##_##DEVICE) \ ->UseRealTime(); BM_BiasAddNHWC(32, 32, 32, 128, cpu); BM_BiasAddNHWC(32, 32, 32, 256, cpu); BM_BiasAddNHWC(32, 32, 32, 512, cpu); BM_BiasAddNHWC(32, 32, 32, 1024, cpu); BM_BiasAddNHWC(32, 64, 64, 128, cpu); BM_BiasAddNHWC(32, 64, 64, 256, cpu); BM_BiasAddNHWC(32, 64, 64, 512, cpu); BM_BiasAddNHWC(32, 64, 64, 1024, cpu); BM_BiasAddGradNHWC(32, 32, 32, 128, cpu); BM_BiasAddGradNHWC(32, 32, 32, 256, cpu); BM_BiasAddGradNHWC(32, 32, 32, 512, cpu); BM_BiasAddGradNHWC(32, 32, 32, 1024, cpu); BM_BiasAddGradNHWC(32, 64, 64, 128, cpu); BM_BiasAddGradNHWC(32, 64, 64, 256, cpu); BM_BiasAddGradNHWC(32, 64, 64, 512, cpu); BM_BiasAddGradNHWC(32, 64, 64, 1024, cpu); #ifdef GOOGLE_CUDA BM_BiasAddGradNHWC(32, 32, 32, 128, gpu); BM_BiasAddGradNHWC(32, 32, 32, 256, gpu); BM_BiasAddGradNHWC(32, 32, 32, 512, gpu); BM_BiasAddGradNHWC(32, 32, 32, 1024, gpu); BM_BiasAddGradNHWC(32, 64, 64, 128, gpu); BM_BiasAddGradNHWC(32, 64, 64, 256, gpu); BM_BiasAddGradNHWC(32, 64, 64, 512, gpu); BM_BiasAddGradNHWC(32, 64, 64, 1024, gpu); #endif }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/bias_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/bias_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b811e6c8-d09e-4d7a-93b9-742eb93dce0e
cpp
tensorflow/tensorflow
stochastic_cast_op
tensorflow/compiler/tf2xla/kernels/stochastic_cast_op.cc
tensorflow/core/ops/stochastic_cast_op_test.cc
#include "tensorflow/core/kernels/stochastic_cast_op.h" #include <string> #include "absl/status/statusor.h" #include "tensorflow/compiler/tf2xla/kernels/random_ops_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" namespace tensorflow { namespace { class StochasticCastToInt : public XlaOpKernel { static constexpr int kInputIndex = 0; public: explicit StochasticCastToInt(OpKernelConstruction* ctx) : XlaOpKernel(ctx), device_type_string_(ctx->device_type().type_string()) { OP_REQUIRES_OK(ctx, ctx->GetAttr("Tin", &from_type_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("Tout", &to_type_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape shape; shape = ctx->InputShape(kInputIndex); absl::StatusOr<xla::XlaOp> randoms_or = BuildUniformRandoms( ctx, from_type_, device_type_string_, shape, xla::Zero, xla::One); OP_REQUIRES_OK(ctx, randoms_or.status()); xla::XlaOp input = ctx->Input(kInputIndex); if (from_type_ == DT_BFLOAT16) { input = xla::ConvertElementType(input, xla::F32); } xla::XlaOp result = xla::Select( xla::Lt(input, xla::ScalarLike(input, 0)), xla::Floor(xla::Add(input, randoms_or.value())), xla::Floor(xla::Sub(xla::Add(input, xla::ScalarLike(input, 1)), randoms_or.value()))); result = xla::Select(xla::Eq(input, xla::Floor(input)), input, result); xla::PrimitiveType to_type; OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(to_type_, &to_type)); result = xla::ConvertElementType(result, to_type); ctx->SetOutput(0, result); } private: DataType from_type_; DataType to_type_; std::string device_type_string_; }; REGISTER_XLA_OP(Name("StochasticCastToInt") .CompileTimeConstantInput("alg") .TypeConstraint("Tin", {DT_DOUBLE, DT_FLOAT, DT_HALF, DT_BFLOAT16}) .TypeConstraint("Tout", {DT_INT32, DT_INT16, DT_INT8}), StochasticCastToInt); } }
#include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(StochasticCastOpTest, StochasticCastToIntShapeInference) { ShapeInferenceTestOp op("StochasticCastToInt"); INFER_OK(op, "[4,2];[1];[1];[]", "in0"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[4,2];[1,2];[1];[]"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[4,2];[1];[1,2];[]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[4,2];[1];[1];[1]"); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/stochastic_cast_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/stochastic_cast_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4c23af1c-2ff4-49b9-9dfc-748298738328
cpp
tensorflow/tensorflow
substr_op
tensorflow/core/kernels/substr_op.cc
tensorflow/core/kernels/substr_op_test.cc
#include <cstddef> #include <cstdlib> #include <string> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/string_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/bcast.h" namespace tensorflow { template <typename T> class SubstrOp : public OpKernel { public: explicit SubstrOp(OpKernelConstruction* ctx) : OpKernel(ctx) { string unit; OP_REQUIRES_OK(ctx, ctx->GetAttr("unit", &unit)); OP_REQUIRES_OK(ctx, ParseCharUnit(unit, &unit_)); } void Compute(OpKernelContext* context) override { const Tensor& input_tensor = context->input(0); const Tensor& pos_tensor = context->input(1); const Tensor& len_tensor = context->input(2); const TensorShape& input_shape = input_tensor.shape(); const TensorShape& pos_shape = pos_tensor.shape(); const TensorShape& len_shape = len_tensor.shape(); OP_REQUIRES(context, (pos_shape == len_shape), errors::InvalidArgument( "pos and len should have the same shape, got: ", pos_shape.DebugString(), " vs. ", len_shape.DebugString())); bool is_scalar = TensorShapeUtils::IsScalar(pos_shape); if (is_scalar || input_shape == pos_shape) { auto input = input_tensor.flat<tstring>(); Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output("output", input_tensor.shape(), &output_tensor)); auto output = output_tensor->flat<tstring>(); if (is_scalar) { const T pos = tensorflow::internal::SubtleMustCopy(pos_tensor.scalar<T>()()); const T len = tensorflow::internal::SubtleMustCopy(len_tensor.scalar<T>()()); for (size_t i = 0; i < input_tensor.NumElements(); ++i) { StringPiece in(input(i)); T byte_pos = pos; T byte_len = len; switch (unit_) { case CharUnit::UTF8_CHAR: OP_REQUIRES( context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len), errors::InvalidArgument("pos ", pos, " out of range for ", "string at index ", i)); break; case CharUnit::BYTE: byte_pos = AdjustedPosIndex(byte_pos, in); OP_REQUIRES( context, FastBoundsCheck(byte_pos, in.size() + 1), errors::InvalidArgument("pos ", pos, " out of range for ", "string b'", in, "' at index ", i)); } StringPiece sub_in = in.substr(byte_pos, byte_len); output(i).assign(sub_in.data(), sub_in.size()); } } else { auto pos_flat = pos_tensor.flat<T>(); auto len_flat = len_tensor.flat<T>(); for (size_t i = 0; i < input_tensor.NumElements(); ++i) { StringPiece in(input(i)); const T pos = tensorflow::internal::SubtleMustCopy(pos_flat(i)); const T len = tensorflow::internal::SubtleMustCopy(len_flat(i)); T byte_pos = pos; T byte_len = len; switch (unit_) { case CharUnit::UTF8_CHAR: OP_REQUIRES( context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len), errors::InvalidArgument("pos ", pos, " out of range for ", "string at index ", i)); break; case CharUnit::BYTE: byte_pos = AdjustedPosIndex(byte_pos, in); OP_REQUIRES( context, FastBoundsCheck(byte_pos, in.size() + 1), errors::InvalidArgument("pos ", pos, " out of range for ", "string b'", in, "' at index ", i)); } StringPiece sub_in = in.substr(byte_pos, byte_len); output(i).assign(sub_in.data(), sub_in.size()); } } } else { BCast bcast(BCast::FromShape(input_shape), BCast::FromShape(pos_shape), false); OP_REQUIRES(context, bcast.IsValid(), errors::InvalidArgument( "Incompatible shapes: ", input_shape.DebugString(), " vs. ", pos_shape.DebugString())); TensorShape output_shape = BCast::ToShape(bcast.result_shape()); int ndims = output_shape.dims(); Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output("output", output_shape, &output_tensor)); switch (ndims) { case 1: { auto input = input_tensor.shaped<tstring, 1>(bcast.x_reshape()); auto output = output_tensor->shaped<tstring, 1>(bcast.result_shape()); auto pos_shaped = pos_tensor.shaped<T, 1>(bcast.y_reshape()); auto len_shaped = len_tensor.shaped<T, 1>(bcast.y_reshape()); Tensor pos_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &pos_buffer)); typename TTypes<T, 1>::Tensor pos_bcast( pos_buffer.shaped<T, 1>(bcast.result_shape())); pos_bcast = pos_shaped.broadcast(BCast::ToIndexArray<1>(bcast.y_bcast())); Tensor len_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &len_buffer)); typename TTypes<T, 1>::Tensor len_bcast( len_buffer.shaped<T, 1>(bcast.result_shape())); len_bcast = len_shaped.broadcast(BCast::ToIndexArray<1>(bcast.y_bcast())); for (int i = 0; i < output_shape.dim_size(0); ++i) { StringPiece in(input(input.dimension(0) > 1 ? i : 0)); const T pos = tensorflow::internal::SubtleMustCopy(pos_bcast(i)); const T len = tensorflow::internal::SubtleMustCopy(len_bcast(i)); T byte_pos = pos; T byte_len = len; switch (unit_) { case CharUnit::UTF8_CHAR: OP_REQUIRES( context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len), errors::InvalidArgument("pos ", pos, " out of range for ", "string at index ", i)); break; case CharUnit::BYTE: byte_pos = AdjustedPosIndex(byte_pos, in); OP_REQUIRES( context, FastBoundsCheck(byte_pos, in.size() + 1), errors::InvalidArgument("pos ", pos, " out of range for ", "string b'", in, "' at index ", i)); } StringPiece sub_in = in.substr(byte_pos, byte_len); output(i).assign(sub_in.data(), sub_in.size()); } break; } case 2: { auto input = input_tensor.shaped<tstring, 2>(bcast.x_reshape()); auto output = output_tensor->shaped<tstring, 2>(bcast.result_shape()); auto pos_shaped = pos_tensor.shaped<T, 2>(bcast.y_reshape()); auto len_shaped = len_tensor.shaped<T, 2>(bcast.y_reshape()); Tensor pos_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &pos_buffer)); typename TTypes<T, 2>::Tensor pos_bcast( pos_buffer.shaped<T, 2>(bcast.result_shape())); pos_bcast = pos_shaped.broadcast(BCast::ToIndexArray<2>(bcast.y_bcast())); Tensor len_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &len_buffer)); typename TTypes<T, 2>::Tensor len_bcast( len_buffer.shaped<T, 2>(bcast.result_shape())); len_bcast = len_shaped.broadcast(BCast::ToIndexArray<2>(bcast.y_bcast())); for (int i = 0; i < output_shape.dim_size(0); ++i) { for (int j = 0; j < output_shape.dim_size(1); ++j) { StringPiece in(input(input.dimension(0) > 1 ? i : 0, input.dimension(1) > 1 ? j : 0)); const T pos = tensorflow::internal::SubtleMustCopy(pos_bcast(i, j)); const T len = tensorflow::internal::SubtleMustCopy(len_bcast(i, j)); T byte_pos = pos; T byte_len = len; switch (unit_) { case CharUnit::UTF8_CHAR: OP_REQUIRES( context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len), errors::InvalidArgument("pos ", pos, " out of range for ", "string at index ", i)); break; case CharUnit::BYTE: byte_pos = AdjustedPosIndex(byte_pos, in); OP_REQUIRES( context, FastBoundsCheck(byte_pos, in.size() + 1), errors::InvalidArgument("pos ", pos, " out of range for ", "string b'", in, "' at index (", i, ", ", j, ")")); } StringPiece sub_in = in.substr(byte_pos, byte_len); output(i, j).assign(sub_in.data(), sub_in.size()); } } break; } default: { context->SetStatus(errors::Unimplemented( "Substr broadcast not implemented for ", ndims, " dimensions")); } } } } private: static inline T AdjustedPosIndex(const T pos_requested, const StringPiece s) { if (pos_requested < 0) { return s.size() + pos_requested; } return pos_requested; } static inline bool UpdatePosAndLenForUtf8(const StringPiece in, T* pos, T* len) { if (*pos >= 0) { return UpdatePositivePosAndLenForUtf8(in, *pos, *len, pos, len); } else { return UpdateNegativePosAndLenForUtf8(in, *pos, *len, pos, len); } } static bool UpdatePositivePosAndLenForUtf8(const StringPiece in, const T pos, const T len, T* char_pos, T* char_len) { *char_pos = 0; if (!ForwardNUTF8CharPositions(in, pos, char_pos)) { return false; } *char_len = *char_pos; ForwardNUTF8CharPositions(in, len, char_len); *char_len = *char_len - *char_pos; return true; } static bool UpdateNegativePosAndLenForUtf8(const StringPiece in, const T pos, const T len, T* char_pos, T* char_len) { *char_len = in.size(); T utf8_chars_to_skip = -pos - len; if (utf8_chars_to_skip < 0) { utf8_chars_to_skip = 0; } if (!BackNUTF8CharPositions(in, utf8_chars_to_skip, char_len)) { return false; } *char_pos = *char_len; if (!BackNUTF8CharPositions(in, -pos - utf8_chars_to_skip, char_pos)) { return false; } *char_len = *char_len - *char_pos; return true; } CharUnit unit_ = CharUnit::BYTE; }; #define REGISTER_SUBSTR(type) \ REGISTER_KERNEL_BUILDER( \ Name("Substr").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ SubstrOp<type>); REGISTER_SUBSTR(int32); REGISTER_SUBSTR(int64_t); }
#include <string> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { const char* ascii_lines[] = { "**TensorFlow** is an open source software library for numerical " "computation using data flow graphs.", "The graph nodes represent mathematical operations, while the graph edges " "represent the multidimensional data arrays (tensors) that flow between " "them.", "This flexible architecture enables you to deploy computation to one or " "more CPUs or GPUs in a desktop, server, or mobile device without " "rewriting code.", "TensorFlow also includes " "[TensorBoard](https: "summaries_and_tensorboard), a data visualization toolkit.", "TensorFlow was originally developed by researchers and engineers working " "on the Google Brain team within Google's Machine Intelligence Research " "organization for the purposes of conducting machine learning and deep " "neural networks research.", "The system is general enough to be applicable in a wide variety of other " "domains, as well.", "TensorFlow provides stable Python API and C APIs as well as without API " "backwards compatibility guarantee like C++, Go, Java, JavaScript and " "Swift."}; const char* unicode_lines[] = { "TensorFlow\xe6\x98\xaf\xe4\xb8\x80\xe4\xb8\xaa\xe4\xbd\xbf\xe7\x94\xa8\xe6" "\x95\xb0\xe6\x8d\xae\xe6\xb5\x81\xe5\x9b\xbe\xe8\xbf\x9b\xe8\xa1\x8c\xe6" "\x95\xb0\xe5\x80\xbc\xe8\xae\xa1\xe7\xae\x97\xe7\x9a\x84\xe5\xbc\x80\xe6" "\xba\x90\xe8\xbd\xaf\xe4\xbb\xb6\xe5\xba\x93\xe3\x80\x82", "\xe5\x9b\xbe\xe5\xbd\xa2\xe8\x8a\x82\xe7\x82\xb9\xe8\xa1\xa8\xe7\xa4\xba" "\xe6\x95\xb0\xe5\xad\xa6\xe8\xbf\x90\xe7\xae\x97\xef\xbc\x8c\xe8\x80\x8c" "\xe5\x9b\xbe\xe5\xbd\xa2\xe8\xbe\xb9\xe7\xbc\x98\xe8\xa1\xa8\xe7\xa4\xba" "\xe5\x9c\xa8\xe5\xae\x83\xe4\xbb\xac\xe4\xb9\x8b\xe9\x97\xb4\xe6\xb5\x81" "\xe5\x8a\xa8\xe7\x9a\x84\xe5\xa4\x9a\xe7\xbb\xb4\xe6\x95\xb0\xe6\x8d\xae" "\xe9\x98\xb5\xe5\x88\x97\xef\xbc\x88\xe5\xbc\xa0\xe9\x87\x8f\xef\xbc\x89" "\xe3\x80\x82", "\xe8\xbf\x99\xe7\xa7\x8d\xe7\x81\xb5\xe6\xb4\xbb\xe7\x9a\x84\xe4\xbd\x93" "\xe7\xb3\xbb\xe7\xbb\x93\xe6\x9e\x84\xe4\xbd\xbf\xe6\x82\xa8\xe5\x8f\xaf" "\xe4\xbb\xa5\xe5\xb0\x86\xe8\xae\xa1\xe7\xae\x97\xe9\x83\xa8\xe7\xbd\xb2" "\xe5\x88\xb0\xe6\xa1\x8c\xe9\x9d\xa2\xef\xbc\x8c\xe6\x9c\x8d\xe5\x8a\xa1" "\xe5\x99\xa8\xe6\x88\x96\xe7\xa7\xbb\xe5\x8a\xa8\xe8\xae\xbe\xe5\xa4\x87" "\xe4\xb8\xad\xe7\x9a\x84\xe4\xb8\x80\xe4\xb8\xaa\xe6\x88\x96\xe5\xa4\x9a" "\xe4\xb8\xaa CPU\xe6\x88\x96GPU\xef\xbc\x8c\xe8\x80\x8c\xe6\x97\xa0\xe9" "\x9c\x80\xe9\x87\x8d\xe5\x86\x99\xe4\xbb\xa3\xe7\xa0\x81\xe3\x80\x82", "TensorFlow\xe8\xbf\x98\xe5\x8c\x85\xe6\x8b\xac[TensorBoard]\xef\xbc\x88" "https: "\xbc\x8c\xe8\xbf\x99\xe6\x98\xaf\xe4\xb8\x80\xe4\xb8\xaa\xe6\x95\xb0\xe6" "\x8d\xae\xe5\x8f\xaf\xe8\xa7\x86\xe5\x8c\x96\xe5\xb7\xa5\xe5\x85\xb7\xe5" "\x8c\x85\xe3\x80\x82", "TensorFlow\xe6\x9c\x80\xe5\x88\x9d\xe6\x98\xaf\xe7\x94\xb1\xe7\xa0\x94\xe7" "\xa9\xb6\xe4\xba\xba\xe5\x91\x98\xe5\x92\x8c\xe5\xb7\xa5\xe7\xa8\x8b\xe5" "\xb8\x88\xe5\x9c\xa8Google\xe6\x9c\xba\xe5\x99\xa8\xe6\x99\xba\xe8\x83\xbd" "\xe7\xa0\x94\xe7\xa9\xb6\xe7\xbb\x84\xe7\xbb\x87\xe7\x9a\x84Google Brain" "\xe5\x9b\xa2\xe9\x98\x9f\xe5\xbc\x80\xe5\x8f\x91\xe7\x9a\x84\xef\xbc\x8c" "\xe7\x9b\xae\xe7\x9a\x84\xe6\x98\xaf\xe8\xbf\x9b\xe8\xa1\x8c\xe6\x9c\xba" "\xe5\x99\xa8\xe5\xad\xa6\xe4\xb9\xa0\xe5\x92\x8c\xe6\xb7\xb1\xe5\xba\xa6" "\xe7\xa5\x9e\xe7\xbb\x8f\xe7\xbd\x91\xe7\xbb\x9c\xe7\xa0\x94\xe7\xa9\xb6" "\xe3\x80\x82", "\xe8\xaf\xa5\xe7\xb3\xbb\xe7\xbb\x9f\xe8\xb6\xb3\xe4\xbb\xa5\xe9\x80\x82" "\xe7\x94\xa8\xe4\xba\x8e\xe5\x90\x84\xe7\xa7\x8d\xe5\x85\xb6\xe4\xbb\x96" "\xe9\xa2\x86\xe5\x9f\x9f\xe4\xb9\x9f\xe6\x98\xaf\xe5\xa6\x82\xe6\xad\xa4" "\xe3\x80\x82", "TensorFlow\xe6\x8f\x90\xe4\xbe\x9b\xe7\xa8\xb3\xe5\xae\x9a\xe7\x9a\x84" "Python API\xe5\x92\x8c C API\xef\xbc\x8c\xe4\xbb\xa5\xe5\x8f\x8a\xe6\xb2" "\xa1\xe6\x9c\x89 API\xe5\x90\x91\xe5\x90\x8e\xe5\x85\xbc\xe5\xae\xb9\xe6" "\x80\xa7\xe4\xbf\x9d\xe8\xaf\x81\xef\xbc\x8c\xe5\xa6\x82 C ++\xef\xbc\x8c" "Go\xef\xbc\x8cJava\xef\xbc\x8cJavaScript\xe5\x92\x8cSwift\xe3\x80\x82", }; const char* const kByteUnit = "BYTE"; const char* const kUTF8Unit = "UTF8_CHAR"; Tensor GetTestTensor(int batch) { const int sz = TF_ARRAYSIZE(ascii_lines); Tensor t(DT_STRING, {batch}); auto s = t.flat<tstring>(); for (int i = 0; i < batch; ++i) { s(i) = ascii_lines[i % sz]; } return t; } Tensor GetTestUTF8Tensor(int batch) { const int sz = TF_ARRAYSIZE(unicode_lines); Tensor t(DT_STRING, {batch}); auto s = t.flat<tstring>(); for (int i = 0; i < batch; ++i) { s(i) = unicode_lines[i % sz]; } return t; } Graph* SetupSubstrGraph(const Tensor& input, const int32_t pos, const int32_t len, const char* const unit) { Graph* g = new Graph(OpRegistry::Global()); Tensor position(DT_INT32, TensorShape({})); position.flat<int32>().setConstant(pos); Tensor length(DT_INT32, TensorShape({})); length.flat<int32>().setConstant(len); TF_CHECK_OK(NodeBuilder("substr_op", "Substr") .Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, position)) .Input(test::graph::Constant(g, length)) .Attr("unit", unit) .Finalize(g, nullptr )); return g; } static void BM_SubstrByte(::testing::benchmark::State& state) { const int batch_size = state.range(0); Tensor input = GetTestTensor(batch_size); Graph* g = SetupSubstrGraph(input, 3, 30, kByteUnit); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(state.iterations()); } static void BM_SubstrUTF8(::testing::benchmark::State& state) { const int batch_size = state.range(0); Tensor input = GetTestUTF8Tensor(batch_size); Graph* g = SetupSubstrGraph(input, 3, 30, kUTF8Unit); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(state.iterations()); } BENCHMARK(BM_SubstrByte) ->UseRealTime() ->Arg(1) ->Arg(8) ->Arg(16) ->Arg(32) ->Arg(64) ->Arg(128) ->Arg(256); BENCHMARK(BM_SubstrUTF8) ->UseRealTime() ->Arg(1) ->Arg(8) ->Arg(16) ->Arg(32) ->Arg(64) ->Arg(128) ->Arg(256); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/substr_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/substr_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
853575ff-8d14-42fe-819a-ae8c76631c2d
cpp
tensorflow/tensorflow
logging_ops
tensorflow/core/ops/logging_ops.cc
tensorflow/core/kernels/logging_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/dataset_stateful_op_allowlist.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::InferenceContext; REGISTER_OP("Assert") .Input("condition: bool") .Input("data: T") .SetIsStateful() .Attr("T: list(type)") .Attr("summarize: int = 3") .SetShapeFn(shape_inference::NoOutputs); ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Assert"); REGISTER_OP("Print") .Input("input: T") .Input("data: U") .Output("output: T") .SetIsStateful() .Attr("T: type") .Attr("U: list(type) >= 0") .Attr("message: string = ''") .Attr("first_n: int = -1") .Attr("summarize: int = 3") .SetShapeFn(shape_inference::UnchangedShape); ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Print"); REGISTER_OP("PrintV2") .Input("input: string") .SetIsStateful() .Attr("output_stream: string = 'stderr'") .Attr("end: string = '\n'") .SetShapeFn([](InferenceContext* c) { if (!c->RankKnown(c->input(0))) return absl::OkStatus(); if (c->Rank(c->input(0)) != 0) { return errors::InvalidArgument("input must be a scalar, but has rank: ", c->Rank(c->input(0))); } return absl::OkStatus(); }); ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("PrintV2"); REGISTER_OP("TensorSummaryV2") .Input("tag: string") .Input("tensor: T") .Input("serialized_summary_metadata: string") .Output("summary: string") .Attr("T: type") .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("TensorSummary") .Input("tensor: T") .Output("summary: string") .Attr("T: type") .Attr("description: string = ''") .Attr("labels: list(string) = []") .Attr("display_name: string = ''") .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("ImageSummary") .Input("tag: string") .Input("tensor: T") .Output("summary: string") .Attr("max_images: int >= 1 = 3") .Attr("T: {uint8, float, half, float64} = DT_FLOAT") .Attr( "bad_color: tensor = { dtype: DT_UINT8 " "tensor_shape: { dim { size: 4 } } " "int_val: 255 int_val: 0 int_val: 0 int_val: 255 }") .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("AudioSummaryV2") .Input("tag: string") .Input("tensor: float") .Input("sample_rate: float") .Output("summary: string") .Attr("max_outputs: int >= 1 = 3") .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("AudioSummary") .Input("tag: string") .Input("tensor: float") .Output("summary: string") .Attr("sample_rate: float") .Attr("max_outputs: int >= 1 = 3") .SetShapeFn(shape_inference::ScalarShape) .Deprecated(15, "Use AudioSummaryV2."); REGISTER_OP("Timestamp") .Output("ts: float64") .SetIsStateful() .SetShapeFn(shape_inference::ScalarShape); ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Timestamp"); }
#include <chrono> #include <thread> #include "xla/tsl/util/determinism_test_util.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/status_matchers.h" namespace tensorflow { namespace { class PrintingV2GraphTest : public OpsTestBase { protected: Status Init(const string& output_stream = "log(warning)") { TF_CHECK_OK(NodeDefBuilder("op", "PrintV2") .Input(FakeInput(DT_STRING)) .Attr("output_stream", output_stream) .Finalize(node_def())); return InitOp(); } }; TEST_F(PrintingV2GraphTest, StringSuccess) { TF_ASSERT_OK(Init()); AddInputFromArray<tstring>(TensorShape({}), {"bar"}); TF_ASSERT_OK(RunOpKernel()); } TEST_F(PrintingV2GraphTest, InvalidOutputStream) { ASSERT_NE(absl::OkStatus(), (Init("invalid_output_stream"))); } TEST_F(PrintingV2GraphTest, InvalidInputRank) { TF_ASSERT_OK(Init()); AddInputFromArray<tstring>(TensorShape({2}), {"bar", "foo"}); ASSERT_NE(absl::OkStatus(), RunOpKernel()); } class PrintingGraphTest : public OpsTestBase { protected: Status Init(DataType input_type1, DataType input_type2, string msg = "", int first_n = -1, int summarize = 3) { TF_CHECK_OK(NodeDefBuilder("op", "Print") .Input(FakeInput(input_type1)) .Input(FakeInput(2, input_type2)) .Attr("message", msg) .Attr("first_n", first_n) .Attr("summarize", summarize) .Finalize(node_def())); return InitOp(); } }; TEST_F(PrintingGraphTest, Int32Success_6) { TF_ASSERT_OK(Init(DT_INT32, DT_INT32)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(PrintingGraphTest, Int32Success_Summarize6) { TF_ASSERT_OK(Init(DT_INT32, DT_INT32, "", -1, 6)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(PrintingGraphTest, StringSuccess) { TF_ASSERT_OK(Init(DT_INT32, DT_STRING)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<tstring>(TensorShape({}), {"foo"}); AddInputFromArray<tstring>(TensorShape({}), {"bar"}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(PrintingGraphTest, MsgSuccess) { TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "Message: ")); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<tstring>(TensorShape({}), {"foo"}); AddInputFromArray<tstring>(TensorShape({}), {"bar"}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(PrintingGraphTest, FirstNSuccess) { TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "", 3)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<tstring>(TensorShape({}), {"foo"}); AddInputFromArray<tstring>(TensorShape({}), {"bar"}); for (int i = 0; i < 4; i++) TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } class TimestampTest : public OpsTestBase { protected: Status Init() { TF_CHECK_OK(NodeDefBuilder("op", "Timestamp").Finalize(node_def())); return InitOp(); } }; TEST_F(TimestampTest, WaitAtLeast) { TF_ASSERT_OK(Init()); TF_ASSERT_OK(RunOpKernel()); double ts1 = *((*GetOutput(0)).flat<double>().data()); std::this_thread::sleep_for(std::chrono::seconds(1)); TF_ASSERT_OK(RunOpKernel()); double ts2 = *((*GetOutput(0)).flat<double>().data()); EXPECT_LE(1.0, ts2 - ts1); } TEST_F(TimestampTest, DeterminismError) { tsl::test::DeterministicOpsScope det_scope; TF_ASSERT_OK(Init()); EXPECT_THAT(RunOpKernel(), testing::StatusIs( error::FAILED_PRECONDITION, "Timestamp cannot be called when determinism is enabled")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/logging_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/logging_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a631fdfb-c701-42a3-84f9-0c1b71de9f2c
cpp
tensorflow/tensorflow
count_ops
tensorflow/core/ops/count_ops.cc
tensorflow/core/kernels/count_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::InferenceContext; using shape_inference::ShapeHandle; Status DenseCountSparseOutputShapeFn(InferenceContext *c) { auto values = c->input(0); auto weights = c->input(1); ShapeHandle output; auto num_weights = c->NumElements(weights); if (c->ValueKnown(num_weights) && c->Value(num_weights) == 0) { output = values; } else { TF_RETURN_IF_ERROR(c->Merge(weights, values, &output)); } auto rank = c->Rank(output); auto nvals = c->UnknownDim(); c->set_output(0, c->Matrix(nvals, rank)); c->set_output(1, c->Vector(nvals)); c->set_output(2, c->Vector(rank)); return absl::OkStatus(); } Status SparseCountSparseOutputShapeFn(InferenceContext *c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused)); auto rank = c->Dim(c->input(0), 1); auto nvals = c->UnknownDim(); c->set_output(0, c->Matrix(nvals, rank)); c->set_output(1, c->Vector(nvals)); c->set_output(2, c->Vector(rank)); return absl::OkStatus(); } Status RaggedCountSparseOutputShapeFn(InferenceContext *c) { int32_t rank = c->Rank(c->input(1)); if (rank != c->kUnknownRank) { ++rank; } auto nvals = c->UnknownDim(); c->set_output(0, c->Matrix(nvals, rank)); c->set_output(1, c->Vector(nvals)); c->set_output(2, c->Vector(rank)); return absl::OkStatus(); } REGISTER_OP("DenseCountSparseOutput") .Input("values: T") .Input("weights: output_type") .Attr("T: {int32, int64}") .Attr("minlength: int >= -1 = -1") .Attr("maxlength: int >= -1 = -1") .Attr("binary_output: bool") .Attr("output_type: {int32, int64, float, double}") .SetShapeFn(DenseCountSparseOutputShapeFn) .Output("output_indices: int64") .Output("output_values: output_type") .Output("output_dense_shape: int64"); REGISTER_OP("SparseCountSparseOutput") .Input("indices: int64") .Input("values: T") .Input("dense_shape: int64") .Input("weights: output_type") .Attr("T: {int32, int64}") .Attr("minlength: int >= -1 = -1") .Attr("maxlength: int >= -1 = -1") .Attr("binary_output: bool") .Attr("output_type: {int32, int64, float, double}") .SetShapeFn(SparseCountSparseOutputShapeFn) .Output("output_indices: int64") .Output("output_values: output_type") .Output("output_dense_shape: int64"); REGISTER_OP("RaggedCountSparseOutput") .Input("splits: int64") .Input("values: T") .Input("weights: output_type") .Attr("T: {int32, int64}") .Attr("minlength: int >= -1 = -1") .Attr("maxlength: int >= -1 = -1") .Attr("binary_output: bool") .Attr("output_type: {int32, int64, float, double}") .SetShapeFn(RaggedCountSparseOutputShapeFn) .Output("output_indices: int64") .Output("output_values: output_type") .Output("output_dense_shape: int64"); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { TEST_F(OpsTestBase, DenseCountSparseOutputShapeFn) { ShapeInferenceTestOp op("DenseCountSparseOutput"); INFER_OK(op, "[?];?", "[?,1];[?];[1]"); INFER_OK(op, "[?,?];?", "[?,2];[?];[2]"); } TEST_F(OpsTestBase, SparseCountSparseOutputShapeFn) { ShapeInferenceTestOp op("SparseCountSparseOutput"); INFER_OK(op, "[?,1];?;?;?", "[?,d0_1];[?];[d0_1]"); INFER_OK(op, "[?,2];?;?;?", "[?,d0_1];[?];[d0_1]"); } TEST_F(OpsTestBase, RaggedCountSparseOutputShapeFn) { ShapeInferenceTestOp op("RaggedCountSparseOutput"); INFER_OK(op, "?;[?];?", "[?,2];[?];[2]"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/count_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/count_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b174aca1-29c4-4460-986b-0277ad8a2da7
cpp
tensorflow/tensorflow
dequantize_op
tensorflow/compiler/tf2xla/kernels/dequantize_op.cc
tensorflow/core/kernels/dequantize_op_test.cc
#include <array> #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/matrix.h" #include "xla/hlo/builder/xla_builder.h" #include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.pb.h" namespace tensorflow { namespace { constexpr std::array<DataType, 2> kQuantizedType = {{DT_QINT8, DT_QUINT8}}; template <typename T> float get_fullrange() { return static_cast<float>(std::numeric_limits<T>::max()) - std::numeric_limits<T>::min(); } class DequantizeOp : public XlaOpKernel { public: explicit DequantizeOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { string mode_string; int axis; bool narrow_range; OP_REQUIRES_OK(ctx, ctx->GetAttr("mode", &mode_string)); OP_REQUIRES( ctx, (mode_string == "MIN_COMBINED"), errors::InvalidArgument("Mode string must be 'MIN_COMBINED' is " + mode_string + "'")); OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range)); OP_REQUIRES(ctx, narrow_range == false, errors::InvalidArgument("narrow_range must be false")); OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis)); OP_REQUIRES(ctx, axis == -1, errors::InvalidArgument("axis must be -1' is ", axis)); OP_REQUIRES_OK(ctx, ctx->GetAttr("dtype", &dtype_)); } ~DequantizeOp() override = default; void Compile(XlaOpKernelContext* ctx) override { DataType input_type = ctx->input_type(0); xla::XlaOp input = ctx->Input(0); xla::XlaOp output = xla::ConvertElementType(input, xla::F32); xla::XlaOp min_range = xla::ConvertElementType(ctx->Input(1), xla::F32); xla::XlaOp max_range = xla::ConvertElementType(ctx->Input(2), xla::F32); xla::XlaOp full_range; xla::XlaOp half_range; if (input_type == DT_QINT8) { full_range = ScalarLike(output, get_fullrange<qint8>()); half_range = (full_range + ScalarLike(output, 1.0f)) / ScalarLike(output, 2.0f); } else { OP_REQUIRES(ctx, input_type == DT_QUINT8, errors::InvalidArgument( "Only support DT_QINT8 or DT_QUINT8, got ", input_type)); full_range = ScalarLike(output, get_fullrange<quint8>()); half_range = ScalarLike(output, 0.0f); } xla::XlaOp scale = (max_range - min_range) / full_range; output = xla::Add(xla::Mul(xla::Add(output, half_range), scale), min_range); if (dtype_ == DT_BFLOAT16) { output = xla::ConvertElementType(output, xla::BF16); } ctx->SetOutput(0, output); } private: DataType dtype_; }; REGISTER_XLA_OP(Name("Dequantize").TypeConstraint("T", kQuantizedType), DequantizeOp); } }
#include <functional> #include <memory> #include <random> #include <vector> #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class DequantizeOpTest : public OpsTestBase { protected: template <typename T> void ComputeDequantizeMinCombinedUsingEigen(const Tensor& input, float min_range, float max_range, Tensor* output) { float half_range = !std::is_signed<T>::value ? 0.0f : (static_cast<float>(std::numeric_limits<T>::max()) - std::numeric_limits<T>::min() + 1) / 2.0f; const float scale_factor = (max_range - min_range) / (static_cast<float>(std::numeric_limits<T>::max()) - std::numeric_limits<T>::min()); output->flat<float>() = ((input.flat<T>().template cast<int>().template cast<float>() + half_range) * scale_factor) + min_range; } template <typename T> void RunDequantizeMinCombinedTest(float min_range, float max_range, const string& op_name) { TF_ASSERT_OK(NodeDefBuilder("dequantize_op", op_name) .Input(FakeInput(DataTypeToEnum<T>::v())) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<T>::v()) .Attr("mode", "MIN_COMBINED") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); std::vector<T> input; for (int64_t i = std::numeric_limits<T>::min(); i < std::numeric_limits<T>::max(); ++i) { input.push_back(static_cast<T>(i)); } TensorShape shape({static_cast<int64_t>(input.size())}); AddInputFromArray<T>(shape, input); AddInputFromArray<float>(TensorShape({}), {min_range}); AddInputFromArray<float>(TensorShape({}), {max_range}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, shape); ComputeDequantizeMinCombinedUsingEigen<T>(GetInput(0), min_range, max_range, &expected); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } template <typename T> void RunDequantizeBfloat16MinCombinedTest(float min_range, float max_range) { TF_ASSERT_OK(NodeDefBuilder("dequantize_op_bfloat16", "Dequantize") .Input(FakeInput(DataTypeToEnum<T>::v())) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<T>::v()) .Attr("mode", "MIN_COMBINED") .Attr("dtype", DT_BFLOAT16) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); std::vector<T> input; for (int64_t i = std::numeric_limits<T>::min(); i < std::numeric_limits<T>::max(); ++i) { input.push_back(static_cast<T>(i)); } TensorShape shape({static_cast<int64_t>(input.size())}); AddInputFromArray<T>(shape, input); AddInputFromArray<float>(TensorShape({}), {min_range}); AddInputFromArray<float>(TensorShape({}), {max_range}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_float32(allocator(), DT_FLOAT, shape); ComputeDequantizeMinCombinedUsingEigen<T>(GetInput(0), min_range, max_range, &expected_float32); Tensor expected(allocator(), DT_BFLOAT16, shape); expected.flat<bfloat16>() = expected_float32.flat<float>().cast<bfloat16>(); test::ExpectTensorEqual<bfloat16>(expected, *GetOutput(0)); } template <typename T> std::vector<T> ScalePerSliceAlongAxis(std::vector<int64_t> dims, int axis, const std::vector<T>& data) { uint32 seed = 123; std::minstd_rand rng(seed); int64_t out_size = 1; for (int dim : dims) { out_size *= dim; } int minor_size = 1; for (int i = axis + 1; i < dims.size(); ++i) { minor_size *= dims[i]; } std::vector<T> out(out_size); int num_slices = (axis == -1) ? 1 : dims[axis]; for (int out_idx = 0; out_idx < out_size; ++out_idx) { int in_idx = rng() % data.size(); T multiplier = ((out_idx / minor_size) % num_slices) + 1; out[out_idx] = data[in_idx] * multiplier; } return out; } template <typename T> void RunDequantizeScaledTest(float min_range, float max_range, int axis, const std::vector<T>& values, const std::vector<float>& expected) { const std::vector<int64_t> dims = {2, 3, 4, 5}; int num_slices = (axis == -1) ? 1 : dims[axis]; TF_ASSERT_OK(NodeDefBuilder("dequantize_op", "Dequantize") .Input(FakeInput(DataTypeToEnum<T>::v())) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<T>::v()) .Attr("mode", "SCALED") .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<T>(TensorShape(dims), ScalePerSliceAlongAxis(dims, -1, values)); std::vector<float> min_ranges(num_slices), max_ranges(num_slices); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { min_ranges[slice_idx] = (slice_idx + 1) * min_range; max_ranges[slice_idx] = (slice_idx + 1) * max_range; } AddInputFromArray<float>(TensorShape({num_slices}), min_ranges); AddInputFromArray<float>(TensorShape({num_slices}), max_ranges); TF_ASSERT_OK(RunOpKernel()); Tensor expected_tensor(allocator(), DT_FLOAT, TensorShape(dims)); test::FillValues<float>(&expected_tensor, ScalePerSliceAlongAxis(dims, axis, expected)); test::ExpectClose(expected_tensor, *GetOutput(0)); } }; struct ParameterizedDequantizeOpTest : public OpsTestBase, public ::testing::WithParamInterface<int> {}; TEST_F(DequantizeOpTest, DequantizeMinCombinedQuint8) { RunDequantizeMinCombinedTest<quint8>(0, 255.0f, "Dequantize"); } TEST_F(DequantizeOpTest, DequantizeMinCombinedQint8) { RunDequantizeMinCombinedTest<qint8>(0, 255.0f, "Dequantize"); } TEST_F(DequantizeOpTest, DequantizeMinCombinedQint16) { RunDequantizeMinCombinedTest<qint16>(0, 255.0f, "Dequantize"); } TEST_F(DequantizeOpTest, DequantizeMinCombinedQuint16) { RunDequantizeMinCombinedTest<quint16>(0, 255.0f, "Dequantize"); } TEST_F(DequantizeOpTest, DequantizeBfloat16MinCombinedQuint8) { RunDequantizeBfloat16MinCombinedTest<quint8>(0, 255.0f); } TEST_F(DequantizeOpTest, DequantizeBfloat16MinCombinedQint8) { RunDequantizeBfloat16MinCombinedTest<qint8>(0, 255.0f); } TEST_F(DequantizeOpTest, DequantizeBfloat16MinCombinedQint16) { RunDequantizeBfloat16MinCombinedTest<qint16>(0, 255.0f); } TEST_F(DequantizeOpTest, DequantizeBfloat16MinCombinedQuint16) { RunDequantizeBfloat16MinCombinedTest<quint16>(0, 255.0f); } TEST_F(DequantizeOpTest, DequantizeScaledQuint8Zero) { RunDequantizeScaledTest<quint8>(-255.0f, 127.0f, -1, {0}, {0.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQuint8CheckIgnoresNegative) { RunDequantizeScaledTest<quint8>(-512.0f, 255.0f, -1, {255}, {255.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQuint8ScaleDown) { RunDequantizeScaledTest<quint8>(-1.0f, 2.0f, -1, {255}, {2.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQuint8ScaleUp) { RunDequantizeScaledTest<quint8>(200.0f, 400.0f, -1, {255}, {400.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQint8Zero) { RunDequantizeScaledTest<qint8>(-255.0f, 127.0f, -1, {0}, {0.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQint8ScaleIdentity) { RunDequantizeScaledTest<qint8>(-10.0f, 127.0f, -1, {-127}, {-127.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQint8ScaleDown) { RunDequantizeScaledTest<qint8>(-2.0f, 1.0f, -1, {-128}, {-2.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQint8ScaleUp) { RunDequantizeScaledTest<qint8>(-1.0f, 300.0f, -1, {42}, {99.212601}); } TEST_F(DequantizeOpTest, DequantizeScaledQint8Axis1) { RunDequantizeScaledTest<qint8>(-12.8f, 12.7f, 1, {-20, -10, 0, 1, 10, 20}, {-2.0, -1.0, 0.0, 0.1, 1.0, 2.0}); } TEST_F(DequantizeOpTest, DequantizeScaledQint8Axis3) { RunDequantizeScaledTest<qint8>(-12.8f, 12.7f, 3, {-20, -10, 0, 1, 10, 20}, {-2.0, -1.0, 0.0, 0.1, 1.0, 2.0}); } template <typename T> static void BM_DequantizeMinCombinedCpu(::testing::benchmark::State& state) { auto root = Scope::NewRootScope().ExitOnError(); const int64_t num_values = 1500 * 250; std::vector<T> inputs; inputs.reserve(num_values); for (int i = 0; i < num_values; ++i) inputs.push_back(i); ops::Dequantize give_me_a_name(root, test::AsTensor<T>(inputs), test::AsScalar<float>(-1.5f), test::AsScalar<float>(20.5f), ops::Dequantize::Attrs().Mode("MIN_COMBINED")); TF_CHECK_OK(root.status()); Graph* g = new Graph(OpRegistry::Global()); TF_CHECK_OK(root.ToGraph(g)); test::Benchmark("cpu", g, false).Run(state); state.SetBytesProcessed(state.iterations() * num_values * (sizeof(float) + sizeof(T))); state.SetItemsProcessed(state.iterations()); } void BM_DequantizeMinCombinedCpuQuint16(::testing::benchmark::State& state) { BM_DequantizeMinCombinedCpu<quint16>(state); } void BM_DequantizeMinCombinedCpuQint16(::testing::benchmark::State& state) { BM_DequantizeMinCombinedCpu<qint16>(state); } void BM_DequantizeMinCombinedCpuQuint8(::testing::benchmark::State& state) { BM_DequantizeMinCombinedCpu<quint8>(state); } void BM_DequantizeMinCombinedCpuQint8(::testing::benchmark::State& state) { BM_DequantizeMinCombinedCpu<qint8>(state); } BENCHMARK(BM_DequantizeMinCombinedCpuQuint16); BENCHMARK(BM_DequantizeMinCombinedCpuQint16); BENCHMARK(BM_DequantizeMinCombinedCpuQuint8); BENCHMARK(BM_DequantizeMinCombinedCpuQint8); template <typename T> static void BM_DequantizeBfloat16MinCombinedCpu( ::testing::benchmark::State& state) { auto root = Scope::NewRootScope().ExitOnError(); const int64_t num_values = 1500 * 250; std::vector<T> inputs; inputs.reserve(num_values); for (int i = 0; i < num_values; ++i) inputs.push_back(i); ops::Dequantize give_me_a_name(root, test::AsTensor<T>(inputs), test::AsScalar<float>(-1.5f), test::AsScalar<float>(20.5f), ops::Dequantize::Attrs().Dtype(DT_BFLOAT16)); TF_CHECK_OK(root.status()); Graph* g = new Graph(OpRegistry::Global()); TF_CHECK_OK(root.ToGraph(g)); test::Benchmark("cpu", g, false).Run(state); state.SetBytesProcessed(state.iterations() * num_values * (sizeof(bfloat16) + sizeof(T))); state.SetItemsProcessed(state.iterations()); } void BM_DequantizeBfloat16MinCombinedCpuQuint16( ::testing::benchmark::State& state) { BM_DequantizeBfloat16MinCombinedCpu<quint16>(state); } void BM_DequantizeBfloat16MinCombinedCpuQint16( ::testing::benchmark::State& state) { BM_DequantizeBfloat16MinCombinedCpu<qint16>(state); } void BM_DequantizeBfloat16MinCombinedCpuQuint8( ::testing::benchmark::State& state) { BM_DequantizeBfloat16MinCombinedCpu<quint8>(state); } void BM_DequantizeBfloat16MinCombinedCpuQint8( ::testing::benchmark::State& state) { BM_DequantizeBfloat16MinCombinedCpu<qint8>(state); } BENCHMARK(BM_DequantizeBfloat16MinCombinedCpuQuint16); BENCHMARK(BM_DequantizeBfloat16MinCombinedCpuQint16); BENCHMARK(BM_DequantizeBfloat16MinCombinedCpuQuint8); BENCHMARK(BM_DequantizeBfloat16MinCombinedCpuQint8); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/dequantize_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/dequantize_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
697d857b-d4cc-4224-b49c-eef394c1a04a
cpp
tensorflow/tensorflow
scoped_allocator_ops
tensorflow/core/ops/scoped_allocator_ops.cc
tensorflow/core/kernels/scoped_allocator_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" namespace tensorflow { REGISTER_OP("_ScopedAllocator") .Output("output: T") .Attr("shapes: list(shape)") .Attr("shape: shape") .Attr("T: type") .Attr("sa_name: string") .Attr("id: int") .Attr("expected_call_count: int") .SetIsStateful() .SetShapeFn(shape_inference::ExplicitShape) .Doc(R"doc( Allocates a mutable tensor that becomes available to appropriately annotated downstream Ops as backing store for their output tensor allocations via the ScopedAllocatorMgr. Returns a reference to this value. This is an experimental op for internal use only. It is possible to use this op in unsafe ways. 'shapes' is a list of the shapes of the tensors that are to be allocated by this ScopedAllocator. 'shape' is the shape of the output of this Op, i.e. the 1D backing tensor from which the individual allocated tensors are aliased. 'sa_name' is the name assigned to the Node, for connectivity specification and debugging. 'id' is a non-negative integer 'scope_id' handled by the ScopedAllocatorMgr. 'expected_call_count' is the number of individual tensors expected to be allocated from the backing tensor. )doc"); REGISTER_OP("_ScopedAllocatorConcat") .Output("output: T") .Input("backing: T") .Input("inputs: N * T") .Attr("shape: shape") .Attr("T: type") .Attr("reshape: bool = false") .Attr("sa_name: string") .Attr("id: int") .Attr("N: int >= 2") .SetIsStateful() .SetShapeFn(shape_inference::ExplicitShape) .Doc(R"doc( Acts like a Concat Op that merges multiple tensors into one, however it must only be used in conjunction with a ScopedAllocator which is backing the memory of all of its input tensors so that actually it just outputs a read-only reference to that ScopedAllocator's backing tensor. This is an experimental op for internal use only. It is possible to use this op in unsafe ways. 'backing' is the backing tensor, i.e. the output of an upstream ScopedAllocator. 'inputs' is a list of nominal input tensors, all of which must be aliases to regions of the backing tensor. These will be outputs of upstream nodes that allocate their outputs from the same ScopedAllocator. 'shape' is the shape of the output, which will usually be the same shape as the input backing tensor. 'reshape' is true iff the output shape is to be different from that of the input backing tensor. 'sa_name' is the Node name of the upstream ScopedAllocator. 'id' is the scope_id identifying the upstream ScopedAllocator. 'N' is the number of nominal inputs to be concatenated. )doc"); REGISTER_OP("_ScopedAllocatorSplit") .Output("output: N * T") .Input("concat: T") .Input("split: N * T") .Attr("T: type") .Attr("sa_name: string") .Attr("id: int") .Attr("N: int >= 2") .Attr("shapes: list(shape)") .SetIsStateful() .SetShapeFn(shape_inference::ExplicitShapes) .Doc(R"doc( Acts roughly like a SplitV Op that splits one tensor into multiple tensors but must only be used in conjunction with corresponding ScopedAllocator and ScopedAllocatorConcat instances. In practice it is provided as inputs the backing tensor as first input, which contains the concatenated values, and a list of alias tensors as its other input and it simply outputs that second list. This is an experimental op for internal use only. It is possible to use this op in unsafe ways. 'concat' is the single output produced by an upstream ScopedAllocatorConcat node. This is actually the backing tensor from a ScopedAllocator node upstream of the ScopedAllocatorConcat. 'split' is a list of tensors aliased from the backing tensor. It will become the output of this ScopedAllocatorSplit node. 'type' is the common DataType of all of the input and output tensors. 'sa_name' is the Node name of the upstream ScopedAllocator. 'id' is the scope_id identifying the upstream ScopedAllocator. 'N' is the number of split tensors. 'shapes' is a list of the split tensor shapes. )doc"); }
#include <vector> #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/common_runtime/scoped_allocator.h" #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { class ScopedAllocatorOpTest : public OpsTestBase { protected: void MakeOp(const TensorShape& shape, const absl::Span<const TensorShape> shapes, DataType dtype, const string& name, int32_t id, int32_t expected_call_count) { TF_EXPECT_OK(NodeDefBuilder("scoped_allocator_op", "_ScopedAllocator") .Attr("T", dtype) .Attr("shape", shape) .Attr("shapes", shapes) .Attr("sa_name", name) .Attr("id", id) .Attr("expected_call_count", expected_call_count) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); TF_ASSERT_OK(RunOpKernel()); AllocatorAttributes attr; Allocator* allocator; for (size_t i = 0; i < shapes.size(); i++) { attr.scope_id = id + i + 1; allocator = device_->GetScopedAllocator(attr, context_->step_id()); Tensor temp(allocator, dtype, shapes[i]); } } }; TEST_F(ScopedAllocatorOpTest, Simple) { MakeOp(TensorShape({8}), {TensorShape({8})}, DT_FLOAT, "test", 120, 1); MakeOp(TensorShape({1024}), {TensorShape({32, 32})}, DT_DOUBLE, "test1", 130, 1); MakeOp(TensorShape({204}), {TensorShape({64}), TensorShape({3, 3}), TensorShape({5, 5, 5})}, DT_HALF, "test2", 140, 3); MakeOp(TensorShape({1024}), {TensorShape({512}), TensorShape({64, 8})}, DT_UINT32, "test3", 150, 2); } void PrepOp(DataType dtype, int32_t id, const std::vector<TensorShape>& fields_shapes, std::vector<ScopedAllocator::Field>* fields, Tensor** backing_tensor, Allocator* allocator, ScopedAllocatorMgr* sam, const string& op_name, std::vector<Tensor>* tensors, absl::InlinedVector<TensorValue, 4>* inputs, const DataTypeVector& input_types) { ScopedAllocatorMgr::PopulateFields(id, fields_shapes, dtype, fields); size_t num_bytes = fields->back().offset + fields->back().bytes_allocated; int32_t num_elements = num_bytes / DataTypeSize(dtype); CHECK_EQ(num_bytes % DataTypeSize(dtype), 0); *backing_tensor = new Tensor(allocator, dtype, {num_elements}); int64_t step_id = 10; Status s = sam->AddScopedAllocator(**backing_tensor, step_id, id, "sa_" + op_name + "_test", *fields, fields_shapes.size()); TF_ASSERT_OK(s); ScopedAllocatorContainer* sac = sam->GetContainer(step_id); std::vector<ScopedAllocatorInstance*> sa_instances(fields_shapes.size(), nullptr); for (size_t i = 0; i < fields_shapes.size(); i++) { sa_instances[i] = sac->GetInstance(id + i + 1); tensors->push_back(Tensor(sa_instances[i], dtype, fields_shapes[i])); } inputs->reserve(1 + tensors->size()); CHECK_GT(input_types.size(), inputs->size()); CHECK_EQ(input_types[inputs->size()], dtype); inputs->push_back({nullptr, *backing_tensor}); for (size_t i = 0; i < tensors->size(); i++) { CHECK_EQ(input_types[inputs->size()], dtype); inputs->push_back({nullptr, &((*tensors)[i])}); } } class ScopedAllocatorConcatOpTest : public OpsTestBase { protected: void BuildNodeDef(const TensorShape& shape, DataType dtype, const string& name, int32_t id, int32_t num_tensors) { TF_EXPECT_OK( NodeDefBuilder("scoped_allocator_concat_op", "_ScopedAllocatorConcat") .Attr("shape", shape) .Attr("T", dtype) .Attr("N", num_tensors) .Attr("sa_name", name) .Attr("id", id) .Input(FakeInput(dtype)) .Input(FakeInput(num_tensors, dtype)) .Finalize(node_def())); shape_ = shape; reshape_ = false; } void BuildNodeDefWithReshape(const TensorShape& shape, DataType dtype, bool reshape, const string& name, int32_t id, int32_t num_tensors) { TF_EXPECT_OK( NodeDefBuilder("scoped_allocator_concat_op", "_ScopedAllocatorConcat") .Attr("shape", shape) .Attr("T", dtype) .Attr("reshape", reshape) .Attr("N", num_tensors) .Attr("sa_name", name) .Attr("id", id) .Input(FakeInput(dtype)) .Input(FakeInput(num_tensors, dtype)) .Finalize(node_def())); shape_ = shape; reshape_ = reshape; } void MakeOp(const TensorShape& shape, DataType dtype, bool reshape, const string& name, int32_t id, int32_t num_tensors) { BuildNodeDefWithReshape(shape, dtype, reshape, name, id, num_tensors); TF_EXPECT_OK(InitOp()); } void ExecOp(DataType dtype, int32_t id, const std::vector<TensorShape>& fields_shapes) { Tensor* backing_tensor = nullptr; std::vector<Tensor> tensors; std::vector<ScopedAllocator::Field> fields; PrepOp(dtype, id, fields_shapes, &fields, &backing_tensor, allocator(), device_->GetScopedAllocatorMgr(), "concat", &tensors, &inputs_, input_types_); TF_ASSERT_OK(RunOpKernel()); const Tensor& input = context_->input(0); OpOutputList output_list; Status s = context_->output_list("output", &output_list); TF_ASSERT_OK(s); const Tensor& output = *(output_list[0]); CHECK_EQ(DMAHelper::base(&input), DMAHelper::base(&output)); CHECK_EQ(input.dtype(), output.dtype()); CHECK_EQ(input.NumElements(), output.NumElements()); if (reshape_) { CHECK_EQ(shape_, output.shape()); } else { TensorShape expected_shape({input.NumElements()}); CHECK_EQ(expected_shape, output.shape()); } delete backing_tensor; } private: TensorShape shape_; bool reshape_; }; TEST_F(ScopedAllocatorConcatOpTest, Success1) { MakeOp({32}, DT_FLOAT, false, "test", 120, 2); ExecOp(DT_FLOAT, 120, {{16}, {16}}); } TEST_F(ScopedAllocatorConcatOpTest, Success2) { MakeOp({2, 2, 2}, DT_DOUBLE, false, "test", 120, 2); ExecOp(DT_DOUBLE, 120, {{2, 2}, {2, 2}}); } TEST_F(ScopedAllocatorConcatOpTest, Success3) { MakeOp({3, 3, 3}, DT_HALF, false, "test", 120, 3); ExecOp(DT_HALF, 120, {{3, 3}, {3, 3}, {3, 3}}); } TEST_F(ScopedAllocatorConcatOpTest, Reshape) { MakeOp({2, 2, 4}, DT_DOUBLE, true, "test", 120, 2); ExecOp(DT_DOUBLE, 120, {{2, 4}, {2, 4}}); } TEST_F(ScopedAllocatorConcatOpTest, NoReshapeAttr) { BuildNodeDef({3, 4, 4}, DT_HALF, "test", 120, 3); TF_EXPECT_OK(InitOp()); ExecOp(DT_HALF, 120, {{4, 4}, {4, 4}, {4, 4}}); } TEST_F(ScopedAllocatorConcatOpTest, FailDtypeCheck) { MakeOp({8}, DT_FLOAT, false, "test", 120, 2); EXPECT_DEATH(ExecOp(DT_DOUBLE, 120, {{4}, {4}}), ""); } TEST_F(ScopedAllocatorConcatOpTest, FailNumElementsCheck) { MakeOp({32}, DT_FLOAT, false, "test", 120, 2); AddInputFromArray<float>({8}, {0, 1, 2, 3, 4, 5, 6, 7}); AddInputFromArray<float>({4}, {0, 1, 2, 3}); AddInputFromArray<float>({4}, {4, 5, 6, 7}); Status s = RunOpKernel(); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); } TEST_F(ScopedAllocatorConcatOpTest, FailBounds) { MakeOp({8}, DT_DOUBLE, false, "test", 120, 2); AddInputFromArray<double>({8}, {0, 1, 2, 3, 4, 5, 6, 7}); AddInputFromArray<double>({4}, {0, 1, 2, 3}); AddInputFromArray<double>({4}, {4, 5, 6, 7}); Status s = RunOpKernel(); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); } class ScopedAllocatorSplitOpTest : public OpsTestBase { protected: void BuildNodeDef(const TensorShape& in_shape, DataType dtype, const string& name, int32_t id, int32_t num_tensors, const std::vector<TensorShape>& out_shapes) { TF_EXPECT_OK( NodeDefBuilder("scoped_allocator_split_op", "_ScopedAllocatorSplit") .Attr("T", dtype) .Attr("N", num_tensors) .Attr("sa_name", name) .Attr("id", id) .Attr("shapes", out_shapes) .Input(FakeInput(dtype)) .Input( FakeInput(num_tensors, dtype)) .Finalize(node_def())); } void MakeOp(const TensorShape& in_shape, DataType dtype, const string& name, int32_t id, int32_t num_tensors, const std::vector<TensorShape>& out_shapes) { BuildNodeDef(in_shape, dtype, name, id, num_tensors, out_shapes); TF_EXPECT_OK(InitOp()); } void ExecOp(DataType dtype, int32_t id, const std::vector<TensorShape>& fields_shapes) { Tensor* backing_tensor = nullptr; std::vector<Tensor> tensors; std::vector<ScopedAllocator::Field> fields; PrepOp(dtype, id, fields_shapes, &fields, &backing_tensor, allocator(), device_->GetScopedAllocatorMgr(), "split", &tensors, &inputs_, input_types_); TF_ASSERT_OK(RunOpKernel()); const Tensor& input = context_->input(0); const void* lower_limit = DMAHelper::base(&input); const char* lower_limit_c = static_cast<const char*>(lower_limit); OpOutputList output_list; Status s = context_->output_list("output", &output_list); TF_ASSERT_OK(s); for (int i = 0; i < output_list.size(); i++) { const Tensor& output = *(output_list[i]); const void* expected_base = static_cast<const void*>(lower_limit_c + fields[i].offset); CHECK_EQ(output.dtype(), input.dtype()); CHECK_EQ(expected_base, DMAHelper::base(&output)); CHECK_EQ(output.NumElements(), fields_shapes[i].num_elements()); } delete backing_tensor; } }; TEST_F(ScopedAllocatorSplitOpTest, Success1) { MakeOp({32}, DT_FLOAT, "test", 120, 2, {{16}, {16}}); ExecOp(DT_FLOAT, 120, {{16}, {16}}); } TEST_F(ScopedAllocatorSplitOpTest, Success2) { MakeOp({2, 2, 2}, DT_DOUBLE, "test", 120, 2, {{2, 2}, {2, 2}}); ExecOp(DT_DOUBLE, 120, {{2, 2}, {2, 2}}); } TEST_F(ScopedAllocatorSplitOpTest, Success3) { MakeOp({3, 3, 3}, DT_HALF, "test", 120, 3, {{3, 3}, {3, 3}, {3, 3}}); ExecOp(DT_HALF, 120, {{3, 3}, {3, 3}, {3, 3}}); } TEST_F(ScopedAllocatorSplitOpTest, FailNLessThan2) { BuildNodeDef({4, 4}, DT_FLOAT, "test", 120, 1, {{4, 4}}); Status s = InitOp(); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); } TEST_F(ScopedAllocatorSplitOpTest, FailDtypeCheck) { MakeOp({8}, DT_FLOAT, "test", 120, 2, {{4}, {4}}); EXPECT_DEATH(ExecOp(DT_HALF, 120, {{4}, {4}}), ""); } TEST_F(ScopedAllocatorSplitOpTest, FailBounds) { MakeOp({8}, DT_DOUBLE, "test", 120, 2, {{4}, {4}}); AddInputFromArray<double>({8}, {0, 1, 2, 3, 4, 5, 6, 7}); AddInputFromArray<double>({4}, {0, 1, 2, 3}); AddInputFromArray<double>({4}, {4, 5, 6, 7}); Status s = RunOpKernel(); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/scoped_allocator_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scoped_allocator_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bbbc2413-3f9b-4c5e-b2d4-176041d08ebe
cpp
tensorflow/tensorflow
parameterized_truncated_normal_op
tensorflow/core/kernels/parameterized_truncated_normal_op.cc
tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/parameterized_truncated_normal_op.h" #include <algorithm> #include <cmath> #include <memory> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/kernels/stateless_random_ops.h" #include "tensorflow/core/lib/random/random_distributions.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/util/guarded_philox_random.h" #include "tensorflow/core/util/work_sharder.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace functor { using random::PhiloxRandom; static constexpr int kMaxIterations = 1000; template <typename T> struct TruncatedNormalFunctor<CPUDevice, T> { void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches, int64_t samples_per_batch, int64_t num_elements, typename TTypes<T>::ConstFlat means, typename TTypes<T>::ConstFlat stddevs, typename TTypes<T>::ConstFlat minvals, typename TTypes<T>::ConstFlat maxvals, const random::PhiloxRandom& gen, typename TTypes<T>::Flat output) { const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3); auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); auto do_work = [samples_per_batch, num_elements, &ctx, &means, &stddevs, &minvals, &maxvals, &gen, &output, kStdDevsInsideBoundsToUseRandnSampler]( int64_t start_batch, int64_t limit_batch) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(start_batch * 2 * kMaxIterations * (samples_per_batch + 3) / 4); using Uniform = random::UniformDistribution<random::PhiloxRandom, T>; Uniform dist; using Normal = random::NormalDistribution<random::PhiloxRandom, T>; Normal normal_dist; Eigen::array<T, 4> z; Eigen::array<T, 4> g; for (int64_t b = start_batch; b < limit_batch; ++b) { T mean = means((means.dimension(0) == 1) ? 0 : b); T stddev = stddevs((stddevs.dimension(0) == 1) ? 0 : b); T minval = minvals((minvals.dimension(0) == 1) ? 0 : b); T maxval = maxvals((maxvals.dimension(0) == 1) ? 0 : b); const int64_t limit_sample = std::min((b + 1) * samples_per_batch, num_elements); int64_t sample = b * samples_per_batch; OP_REQUIRES(ctx, stddev > T(0) && minval < maxval && (Eigen::numext::isfinite(minval) || Eigen::numext::isfinite(maxval)), errors::InvalidArgument("Invalid parameters")); int num_iterations = 0; if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) { std::swap(minval, maxval); stddev = -stddev; } const T normMin = (minval - mean) / stddev; const T normMax = (maxval - mean) / stddev; const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4)); const T cutoff = T(2) * Eigen::numext::exp(T(0.5) + (normMin * (normMin - sqrtFactor)) / T(4)) / (normMin + sqrtFactor); const T diff = normMax - normMin; if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) && (normMax >= T(0.))) || ((normMax > kStdDevsInsideBoundsToUseRandnSampler) && (normMin <= T(0.)))) { while (sample < limit_sample) { const auto randn_sample = normal_dist(&gen_copy); const int size = randn_sample.size(); for (int i = 0; i < size; i++) { if ((randn_sample[i] >= normMin) && (randn_sample[i] <= normMax)) { output(sample) = randn_sample[i] * stddev + mean; sample++; if (sample >= limit_sample) { break; } num_iterations = 0; } else { num_iterations++; if (num_iterations > kMaxIterations) { LOG(ERROR) << "TruncatedNormal randn rejection sampler " << "exceeded maximum iterations for " << "normMin=" << normMin << " normMax=" << normMax << " kMaxIterations=" << kMaxIterations; ctx->SetStatus(errors::Internal( "TruncatedNormal randn rejection sampler failed to accept" " a sample.")); return; } } } } } else if (diff < cutoff) { const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin; while (sample < limit_sample) { const auto rand = dist(&gen_copy); const int size = rand.size(); for (int i = 0; i < size; i++) { z[i] = rand[i] * diff + normMin; } for (int i = 0; i < size; i++) { g[i] = (plusFactor - z[i] * z[i]) / T(2.0); } const auto u = dist(&gen_copy); for (int i = 0; i < size; i++) { auto accept = u[i] <= Eigen::numext::exp(g[i]); if (accept || num_iterations + 1 >= kMaxIterations) { if (!accept) { LOG(ERROR) << "TruncatedNormal uniform rejection sampler " << "exceeded max iterations. Sample may contain " << "outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal uniform rejection sampler failed to " " accept a sample.")); return; } output(sample) = z[i] * stddev + mean; sample++; if (sample >= limit_sample) { break; } num_iterations = 0; } else { num_iterations++; } } } } else { const T alpha = (normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) / T(2); while (sample < limit_sample) { auto rand = dist(&gen_copy); const int size = rand.size(); int i = 0; while (i < size) { const T z = -Eigen::numext::log(rand[i]) / alpha + normMin; i++; const T x = normMin < alpha ? alpha - z : normMin - alpha; const T g = Eigen::numext::exp(-x * x / T(2.0)); const T u = rand[i]; i++; auto accept = (u <= g && z < normMax); if (accept || num_iterations + 1 >= kMaxIterations) { if (!accept) { LOG(ERROR) << "TruncatedNormal exponential distribution " << "rejection sampler exceeds max iterations. " << "Sample may contain outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal exponential distribution rejection" " sampler failed to accept a sample.")); return; } output(sample) = z * stddev + mean; sample++; if (sample >= limit_sample) { break; } num_iterations = 0; } else { num_iterations++; } } } } } }; const int64_t batchInitCost = (Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>()) * 2 + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() + Eigen::internal::functor_traits< Eigen::internal::scalar_sqrt_op<T>>::Cost + Eigen::TensorOpCost::MulCost<T>() * 4 + Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost + Eigen::TensorOpCost::AddCost<T>(); const int64_t uniformSampleCost = random::PhiloxRandom::kElementCost + random::UniformDistribution<random::PhiloxRandom, T>::kElementCost; const int64_t uniformRejectionSamplingCost = uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() * 2 + Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost + Eigen::internal::functor_traits< Eigen::internal::scalar_exp_op<T>>::Cost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>(); const int64_t batchCost = batchInitCost + uniformRejectionSamplingCost * 2 * samples_per_batch; Shard(worker_threads.num_threads, worker_threads.workers, num_batches, batchCost, do_work); } }; template <typename T> struct TruncatedNormalFunctorV2<CPUDevice, T> { void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches, int64_t samples_per_batch, int64_t num_elements, const BCastList<4>& bcast, typename TTypes<T>::ConstFlat means, typename TTypes<T>::ConstFlat stddevs, typename TTypes<T>::ConstFlat minvals, typename TTypes<T>::ConstFlat maxvals, const random::PhiloxRandom& gen, typename TTypes<T>::Flat output) { const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3); auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); auto do_work = [num_batches, samples_per_batch, &ctx, &bcast, &means, &stddevs, &minvals, &maxvals, &gen, &output, kStdDevsInsideBoundsToUseRandnSampler]( int64_t start_output, int64_t limit_output) { random::PhiloxRandom gen_copy = gen; using Uniform = random::UniformDistribution<random::PhiloxRandom, T>; Uniform dist; using Normal = random::NormalDistribution<random::PhiloxRandom, T>; Normal normal_dist; gen_copy.Skip((start_output * 2 * kMaxIterations + Uniform::kResultElementCount - 1) / Uniform::kResultElementCount); Eigen::array<T, Uniform::kResultElementCount> z; Eigen::array<T, Uniform::kResultElementCount> g; const bool should_bcast = bcast.IsBroadcastingRequired(); const auto& means_batch_indices = bcast.batch_indices(0); const auto& stddevs_batch_indices = bcast.batch_indices(1); const auto& minvals_batch_indices = bcast.batch_indices(2); const auto& maxvals_batch_indices = bcast.batch_indices(3); auto output_flat = output.data(); for (int64_t output_idx = start_output; output_idx < limit_output; ) { int64_t batch_idx = output_idx / samples_per_batch; T* const output_batch_offset = output_flat + batch_idx; T mean, stddev, minval, maxval; if (should_bcast) { mean = means(means_batch_indices[batch_idx]); stddev = stddevs(stddevs_batch_indices[batch_idx]); minval = minvals(minvals_batch_indices[batch_idx]); maxval = maxvals(maxvals_batch_indices[batch_idx]); } else { mean = means(batch_idx); stddev = stddevs(batch_idx); minval = minvals(batch_idx); maxval = maxvals(batch_idx); } OP_REQUIRES(ctx, stddev > T(0) && minval < maxval && (Eigen::numext::isfinite(minval) || Eigen::numext::isfinite(maxval)), errors::InvalidArgument("Invalid parameters")); int num_iterations = 0; if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) { std::swap(minval, maxval); stddev = -stddev; } const T normMin = (minval - mean) / stddev; const T normMax = (maxval - mean) / stddev; const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4)); const T cutoff = T(2) * Eigen::numext::exp(T(0.5) + (normMin * (normMin - sqrtFactor)) / T(4)) / (normMin + sqrtFactor); const T diff = normMax - normMin; if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) && (normMax >= T(0.))) || ((normMax > kStdDevsInsideBoundsToUseRandnSampler) && (normMin <= T(0.)))) { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output;) { const auto randn_sample = normal_dist(&gen_copy); const int size = randn_sample.size(); for (int i = 0; i < size; ++i) { if ((randn_sample[i] >= normMin) && (randn_sample[i] <= normMax)) { output_batch_offset[sample_idx * num_batches] = randn_sample[i] * stddev + mean; ++sample_idx; ++output_idx; if (sample_idx >= samples_per_batch || output_idx >= limit_output) { break; } num_iterations = 0; } else { ++num_iterations; if (num_iterations > kMaxIterations) { LOG(ERROR) << "TruncatedNormal randn rejection sampler " << "exceeded maximum iterations for " << "normMin=" << normMin << " normMax=" << normMax << " kMaxIterations=" << kMaxIterations; ctx->SetStatus(errors::Internal( "TruncatedNormal randn rejection sampler failed to accept" " a sample.")); return; } } } } } else if (diff < cutoff) { const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin; for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output;) { const auto rand = dist(&gen_copy); const int size = rand.size(); for (int i = 0; i < size; i++) { z[i] = rand[i] * diff + normMin; g[i] = (plusFactor - z[i] * z[i]) / T(2.0); } const auto u = dist(&gen_copy); for (int i = 0; i < size; i++) { auto accept = u[i] <= Eigen::numext::exp(g[i]); if (accept || num_iterations + 1 >= kMaxIterations) { if (!accept) { LOG(ERROR) << "TruncatedNormal uniform rejection sampler " << "exceeded max iterations. Sample may contain " << "outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal uniform rejection sampler failed to " " accept a sample.")); return; } output_batch_offset[sample_idx * num_batches] = z[i] * stddev + mean; ++sample_idx; ++output_idx; if (sample_idx >= samples_per_batch || output_idx >= limit_output) { break; } num_iterations = 0; } else { num_iterations++; } } } } else { const T alpha = (normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) / T(2); for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output;) { auto rand = dist(&gen_copy); const int size = rand.size(); int i = 0; while (i < size) { const T z = -Eigen::numext::log(rand[i]) / alpha + normMin; i++; const T x = normMin < alpha ? alpha - z : normMin - alpha; const T g = Eigen::numext::exp(-x * x / T(2.0)); const T u = rand[i]; i++; auto accept = (u <= g && z < normMax); if (accept || num_iterations + 1 >= kMaxIterations) { if (!accept) { LOG(ERROR) << "TruncatedNormal exponential distribution " << "rejection sampler exceeds max iterations. " << "Sample may contain outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal exponential distribution rejection" " sampler failed to accept a sample.")); return; } output_batch_offset[sample_idx * num_batches] = z * stddev + mean; ++sample_idx; ++output_idx; if (sample_idx >= samples_per_batch || output_idx >= limit_output) { break; } num_iterations = 0; } else { num_iterations++; } } } } } }; const int64_t batchInitCost = (Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>()) * 2 + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() + Eigen::internal::functor_traits< Eigen::internal::scalar_sqrt_op<T>>::Cost + Eigen::TensorOpCost::MulCost<T>() * 4 + Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost + Eigen::TensorOpCost::AddCost<T>(); const int64_t uniformSampleCost = random::PhiloxRandom::kElementCost + random::UniformDistribution<random::PhiloxRandom, T>::kElementCost; const int64_t uniformRejectionSamplingCost = uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() * 2 + Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost + Eigen::internal::functor_traits< Eigen::internal::scalar_exp_op<T>>::Cost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>(); const int64_t batchCost = batchInitCost + uniformRejectionSamplingCost * 2; Shard(worker_threads.num_threads, worker_threads.workers, num_elements, batchCost, do_work); } }; } namespace { template <typename Device, typename T> class ParameterizedTruncatedNormalOp : public OpKernel { static constexpr int32_t kDesiredBatchSize = 100; public: explicit ParameterizedTruncatedNormalOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, generator_.Init(context)); } void Compute(OpKernelContext* ctx) override { const Tensor& shape_tensor = ctx->input(0); const Tensor& means_tensor = ctx->input(1); const Tensor& stddevs_tensor = ctx->input(2); const Tensor& minvals_tensor = ctx->input(3); const Tensor& maxvals_tensor = ctx->input(4); OP_REQUIRES( ctx, TensorShapeUtils::IsVector(shape_tensor.shape()), errors::InvalidArgument("Input shape should be a vector, got shape: ", shape_tensor.shape().DebugString())); OP_REQUIRES(ctx, shape_tensor.NumElements() > 0, errors::InvalidArgument("Shape tensor must not be empty, got ", shape_tensor.DebugString())); TensorShape tensor_shape; OP_REQUIRES_OK(ctx, tensor::MakeShape(shape_tensor, &tensor_shape)); int32_t num_batches = tensor_shape.dim_size(0); int32_t samples_per_batch = 1; const int32_t num_dims = tensor_shape.dims(); for (int32_t i = 1; i < num_dims; i++) { samples_per_batch *= tensor_shape.dim_size(i); } const int32_t num_elements = num_batches * samples_per_batch; Tensor* samples_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, tensor_shape, &samples_tensor)); OP_REQUIRES(ctx, means_tensor.dims() <= 1, errors::InvalidArgument( "Input means should be a scalar or vector, got shape: ", means_tensor.shape().DebugString())); OP_REQUIRES(ctx, stddevs_tensor.dims() <= 1, errors::InvalidArgument( "Input stddevs should be a scalar or vector, got shape: ", stddevs_tensor.shape().DebugString())); OP_REQUIRES(ctx, minvals_tensor.dims() <= 1, errors::InvalidArgument( "Input minvals should be a scalar or vector, got shape: ", minvals_tensor.shape().DebugString())); OP_REQUIRES(ctx, maxvals_tensor.dims() <= 1, errors::InvalidArgument( "Input maxvals should be a scalar or vector, got shape: ", maxvals_tensor.shape().DebugString())); if ((means_tensor.dims() == 0 || means_tensor.dim_size(0) == 1) && (stddevs_tensor.dims() == 0 || stddevs_tensor.dim_size(0) == 1) && minvals_tensor.dims() == 0 && maxvals_tensor.dims() == 0) { int32_t size = num_batches * samples_per_batch; int32_t adjusted_samples = kDesiredBatchSize; int32_t adjusted_batches = Eigen::divup(size, adjusted_samples); num_batches = adjusted_batches; samples_per_batch = adjusted_samples; } else { OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(means_tensor.shape()) || means_tensor.dim_size(0) == 1 || means_tensor.dim_size(0) == num_batches, errors::InvalidArgument( "Input means should have length 1 or shape[0], got shape: ", means_tensor.shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(stddevs_tensor.shape()) || stddevs_tensor.dim_size(0) == 1 || stddevs_tensor.dim_size(0) == num_batches, errors::InvalidArgument( "Input stddevs should have length 1 or shape[0], got shape: ", stddevs_tensor.shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(minvals_tensor.shape()) || minvals_tensor.dim_size(0) == 1 || minvals_tensor.dim_size(0) == num_batches, errors::InvalidArgument( "Input minvals should have length 1 or shape[0], got shape: ", minvals_tensor.shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(maxvals_tensor.shape()) || maxvals_tensor.dim_size(0) == 1 || maxvals_tensor.dim_size(0) == num_batches, errors::InvalidArgument( "Input maxvals should have length 1 or shape[0], got shape: ", maxvals_tensor.shape().DebugString())); } auto truncFunctor = functor::TruncatedNormalFunctor<Device, T>(); random::PhiloxRandom rng = generator_.ReserveSamples128(num_batches * 2 * functor::kMaxIterations * (samples_per_batch + 3) / 4); truncFunctor(ctx, ctx->eigen_device<Device>(), num_batches, samples_per_batch, num_elements, means_tensor.flat<T>(), stddevs_tensor.flat<T>(), minvals_tensor.flat<T>(), maxvals_tensor.flat<T>(), rng, samples_tensor->flat<T>()); } private: GuardedPhiloxRandom generator_; ParameterizedTruncatedNormalOp(const ParameterizedTruncatedNormalOp&) = delete; void operator=(const ParameterizedTruncatedNormalOp&) = delete; }; template <typename Device, typename T> class StatelessParameterizedTruncatedNormal : public OpKernel { static const int32_t kDesiredBatchSize = 100; public: explicit StatelessParameterizedTruncatedNormal(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& shape_tensor = ctx->input(0); const Tensor& seed_tensor = ctx->input(1); const Tensor& means_tensor = ctx->input(2); const Tensor& stddevs_tensor = ctx->input(3); const Tensor& minvals_tensor = ctx->input(4); const Tensor& maxvals_tensor = ctx->input(5); OP_REQUIRES(ctx, seed_tensor.dims() == 1 && seed_tensor.dim_size(0) == 2, errors::InvalidArgument("seed must have shape [2], not ", seed_tensor.shape().DebugString())); tensorflow::BCastList<4> bcast( {means_tensor.shape().dim_sizes(), stddevs_tensor.shape().dim_sizes(), minvals_tensor.shape().dim_sizes(), maxvals_tensor.shape().dim_sizes()}, false, true); OP_REQUIRES(ctx, bcast.IsValid(), errors::InvalidArgument( "means, stddevs, minvals, maxvals must have compatible " "batch dimensions: ", means_tensor.shape().DebugString(), " vs. ", stddevs_tensor.shape().DebugString(), " vs. ", minvals_tensor.shape().DebugString(), " vs. ", maxvals_tensor.shape().DebugString())); TensorShape bcast_shape = BCast::ToShape(bcast.output_shape()); OP_REQUIRES( ctx, TensorShapeUtils::IsVector(shape_tensor.shape()), errors::InvalidArgument("Input shape should be a vector, got shape: ", shape_tensor.shape().DebugString())); TensorShape output_shape; if (shape_tensor.dtype() == DataType::DT_INT32) { OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(shape_tensor.vec<int32>(), &output_shape)); } else { OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( shape_tensor.vec<int64_t>(), &output_shape)); } OP_REQUIRES(ctx, TensorShapeUtils::EndsWith(output_shape, bcast_shape), errors::InvalidArgument( "Shape passed in must end with broadcasted shape.")); int64_t samples_per_batch = 1; const int64_t num_sample_dims = (shape_tensor.dim_size(0) - bcast.output_shape().size()); for (int64_t i = 0; i < num_sample_dims; ++i) { samples_per_batch *= output_shape.dim_size(i); } int64_t num_batches = 1; for (int64_t i = num_sample_dims; i < shape_tensor.dim_size(0); ++i) { num_batches *= output_shape.dim_size(i); } const int64_t num_elements = num_batches * samples_per_batch; Tensor* samples_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &samples_tensor)); auto truncFunctor = functor::TruncatedNormalFunctorV2<Device, T>(); random::PhiloxRandom::Key key; random::PhiloxRandom::ResultType counter; OP_REQUIRES_OK(ctx, GenerateKey(seed_tensor, &key, &counter)); auto philox = random::PhiloxRandom(counter, key); truncFunctor(ctx, ctx->eigen_device<Device>(), num_batches, samples_per_batch, num_elements, bcast, means_tensor.flat<T>(), stddevs_tensor.flat<T>(), minvals_tensor.flat<T>(), maxvals_tensor.flat<T>(), philox, samples_tensor->flat<T>()); } private: StatelessParameterizedTruncatedNormal( const StatelessParameterizedTruncatedNormal&) = delete; void operator=(const StatelessParameterizedTruncatedNormal&) = delete; }; } #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER(Name("ParameterizedTruncatedNormal") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("dtype"), \ ParameterizedTruncatedNormalOp<CPUDevice, TYPE>) \ REGISTER_KERNEL_BUILDER( \ Name("StatelessParameterizedTruncatedNormal") \ .HostMemory("shape") \ .HostMemory("seed") \ .HostMemory("means") \ .HostMemory("stddevs") \ .HostMemory("minvals") \ .HostMemory("maxvals") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("dtype"), \ StatelessParameterizedTruncatedNormal<CPUDevice, TYPE>) TF_CALL_half(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); #undef REGISTER #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER(Name("ParameterizedTruncatedNormal") \ .Device(DEVICE_GPU) \ .HostMemory("shape") \ .TypeConstraint<TYPE>("dtype"), \ ParameterizedTruncatedNormalOp<GPUDevice, TYPE>) TF_CALL_half(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); #undef REGISTER #endif }
#include <functional> #include <memory> #include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* PTruncatedNormal(int num_batches, int samples_per_batch) { Graph* g = new Graph(OpRegistry::Global()); Tensor shape_t(DT_INT32, TensorShape({2})); shape_t.flat<int32>().setValues({num_batches, samples_per_batch}); Tensor means_t(DT_FLOAT, TensorShape({num_batches})); means_t.flat<float>().setConstant(0.0); Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches})); stdevs_t.flat<float>().setConstant(1.0); Tensor minvals_t(DT_FLOAT, TensorShape({num_batches})); minvals_t.flat<float>().setRandom(); Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches})); maxvals_t.flat<float>().setConstant(5.0); Node* ret; TF_CHECK_OK( NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal") .Input(test::graph::Constant(g, shape_t)) .Input(test::graph::Constant(g, means_t)) .Input(test::graph::Constant(g, stdevs_t)) .Input(test::graph::Constant(g, minvals_t)) .Input(test::graph::Constant(g, maxvals_t)) .Attr("dtype", DT_FLOAT) .Finalize(g, &ret)); return g; } static Graph* PTruncatedNormal2SD(int num_batches, int samples_per_batch) { Graph* g = new Graph(OpRegistry::Global()); Tensor shape_t(DT_INT32, TensorShape({2})); shape_t.flat<int32>().setValues({num_batches, samples_per_batch}); Tensor means_t(DT_FLOAT, TensorShape({num_batches})); means_t.flat<float>().setConstant(0.0); Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches})); stdevs_t.flat<float>().setConstant(1.0); Tensor minvals_t(DT_FLOAT, TensorShape({num_batches})); minvals_t.flat<float>().setConstant(-2.0); Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches})); maxvals_t.flat<float>().setConstant(2.0); Node* ret; TF_CHECK_OK( NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal") .Input(test::graph::Constant(g, shape_t)) .Input(test::graph::Constant(g, means_t)) .Input(test::graph::Constant(g, stdevs_t)) .Input(test::graph::Constant(g, minvals_t)) .Input(test::graph::Constant(g, maxvals_t)) .Attr("dtype", DT_FLOAT) .Finalize(g, &ret)); return g; } static Graph* PTruncatedNormalOneTail(int num_batches, int samples_per_batch) { Graph* g = new Graph(OpRegistry::Global()); Tensor shape_t(DT_INT32, TensorShape({2})); shape_t.flat<int32>().setValues({num_batches, samples_per_batch}); Tensor means_t(DT_FLOAT, TensorShape({num_batches})); means_t.flat<float>().setConstant(0.0); Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches})); stdevs_t.flat<float>().setConstant(1.0); Tensor minvals_t(DT_FLOAT, TensorShape({num_batches})); minvals_t.flat<float>().setConstant(2.0); Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches})); maxvals_t.flat<float>().setConstant(std::numeric_limits<float>::infinity()); Node* ret; TF_CHECK_OK( NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal") .Input(test::graph::Constant(g, shape_t)) .Input(test::graph::Constant(g, means_t)) .Input(test::graph::Constant(g, stdevs_t)) .Input(test::graph::Constant(g, minvals_t)) .Input(test::graph::Constant(g, maxvals_t)) .Attr("dtype", DT_FLOAT) .Finalize(g, &ret)); return g; } #define BM_PTruncatedNormalDev(DEVICE, B, S) \ static void BM_PTruncatedNormal_##DEVICE##_##B##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, PTruncatedNormal(B, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \ } \ BENCHMARK(BM_PTruncatedNormal_##DEVICE##_##B##_##S); #define BM_PTruncatedNormalDev_2SD(DEVICE, B, S) \ static void BM_PTruncatedNormal_2SD_##DEVICE##_##B##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, PTruncatedNormal2SD(B, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \ } \ BENCHMARK(BM_PTruncatedNormal_2SD_##DEVICE##_##B##_##S); #define BM_PTruncatedNormalDev_OneTail(DEVICE, B, S) \ static void BM_PTruncatedNormal_OneTail_##DEVICE##_##B##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, PTruncatedNormalOneTail(B, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \ } \ BENCHMARK(BM_PTruncatedNormal_OneTail_##DEVICE##_##B##_##S); BM_PTruncatedNormalDev(cpu, 1000, 1000); BM_PTruncatedNormalDev_2SD(cpu, 10000, 100); BM_PTruncatedNormalDev_OneTail(cpu, 10000, 100); BM_PTruncatedNormalDev(gpu, 1000, 1000); BM_PTruncatedNormalDev_2SD(gpu, 10000, 100); BM_PTruncatedNormalDev_OneTail(gpu, 10000, 100); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/parameterized_truncated_normal_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9ebde96a-9c38-4b91-acb1-612813f99103
cpp
tensorflow/tensorflow
ragged_gather_op
tensorflow/core/kernels/ragged_gather_op.cc
tensorflow/core/kernels/ragged_gather_op_test.cc
#include <limits> #include <memory> #include <string> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/util/util.h" namespace tensorflow { namespace { template <typename VALUE_TYPE, typename SPLITS_TYPE> void WriteValueSlices( const Tensor& params_dense_values_in, const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices, SPLITS_TYPE value_size, Tensor* values_out) { const auto& params_dense_values = params_dense_values_in.flat_outer_dims<VALUE_TYPE, 2>(); auto values = values_out->flat_outer_dims<VALUE_TYPE, 2>(); int out_pos = 0; for (const auto& slice : value_slices) { for (int i = slice.first; i < slice.second; ++i) { for (int j = 0; j < value_size; ++j) { values(out_pos, j) = params_dense_values(i, j); } ++out_pos; } } } } template <typename INDEX_TYPE, typename SPLITS_TYPE> class RaggedGatherOpBase : public OpKernel { public: using OpKernel::OpKernel; void Compute(OpKernelContext* context) override { OpInputList params_nested_splits_in; OP_REQUIRES_OK(context, context->input_list("params_nested_splits", &params_nested_splits_in)); OP_REQUIRES( context, params_nested_splits_in.size() > 0, errors::InvalidArgument("params_nested_splits must be non empty")); const Tensor& params_dense_values_in = context->input(params_nested_splits_in.size()); const Tensor& indices_in = context->input(params_nested_splits_in.size() + 1); OP_REQUIRES(context, params_nested_splits_in[0].dims() > 0, errors::InvalidArgument("Split tensors must not be scalars")); SPLITS_TYPE num_params = params_nested_splits_in[0].dim_size(0) - 1; OP_REQUIRES_OK(context, ValidateIndices(indices_in, num_params)); OP_REQUIRES(context, params_dense_values_in.dims() > 0, errors::InvalidArgument("params.rank must be nonzero")); SPLITS_TYPE num_params_dense_values = params_dense_values_in.dim_size(0); std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>> value_slices; SPLITS_TYPE num_values = 0; std::vector<std::vector<SPLITS_TYPE>> out_splits; OP_REQUIRES_OK(context, MakeSplits(indices_in, params_nested_splits_in, num_params_dense_values, &out_splits, &value_slices, &num_values)); OP_REQUIRES_OK(context, WriteSplits(out_splits, context)); OP_REQUIRES_OK(context, WriteValues(params_dense_values_in, value_slices, out_splits.size(), num_values, context)); } private: using ConstFlatType = typename TTypes<SPLITS_TYPE>::ConstFlat; ::tensorflow::Status ValidateIndices(const Tensor& indices_in, SPLITS_TYPE num_params) { const auto& indices = indices_in.flat<INDEX_TYPE>(); for (SPLITS_TYPE i = 0; i < indices.size(); ++i) { SPLITS_TYPE index = indices(i); if (index < 0 || index >= num_params) { return errors::InvalidArgument( "indices", SliceDebugString(indices_in.shape(), i), " = ", index, " is not in [0, ", num_params, ")"); } } return absl::OkStatus(); } ::tensorflow::Status MakeSplits( const Tensor& indices_in, const OpInputList& params_nested_splits_in, SPLITS_TYPE num_params_dense_values, std::vector<std::vector<SPLITS_TYPE>>* out_splits, std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>* value_slices, SPLITS_TYPE* num_values) { *num_values = 0; value_slices->clear(); int num_splits = indices_in.dims() - 1 + params_nested_splits_in.size(); out_splits->assign(num_splits, {0}); const auto& indices = indices_in.flat<INDEX_TYPE>(); std::vector<ConstFlatType> params_nested_splits; params_nested_splits.reserve(params_nested_splits_in.size()); for (const auto& splits_in : params_nested_splits_in) { params_nested_splits.push_back(splits_in.flat<SPLITS_TYPE>()); } TF_RETURN_IF_ERROR( ValidateSplits(params_nested_splits, num_params_dense_values)); int nrows = 1; for (int dim = 0; dim < indices_in.dims() - 1; ++dim) { nrows *= indices_in.dim_size(dim); int row_length = indices_in.dim_size(dim + 1); for (int i = 1; i < nrows + 1; ++i) { out_splits->at(dim).push_back(i * row_length); } } for (int i = 0; i < indices.size(); ++i) { int start = indices(i); int limit = indices(i) + 1; for (int dim = 0; dim < params_nested_splits.size(); ++dim) { const auto& splits = params_nested_splits[dim]; int out_dim = dim + indices_in.dims() - 1; if (out_dim >= 0) { SPLITS_TYPE delta = out_splits->at(out_dim).back() - splits(start); for (int j = start; j < limit; ++j) { out_splits->at(out_dim).push_back(splits(j + 1) + delta); } } start = splits(start); limit = splits(limit); } if (limit != start) { value_slices->emplace_back(start, limit); *num_values += limit - start; } } return absl::OkStatus(); } ::tensorflow::Status ValidateSplits( const std::vector<ConstFlatType>& params_nested_splits, SPLITS_TYPE num_params_dense_values) { for (int dim = 0; dim < params_nested_splits.size(); ++dim) { const auto& splits = params_nested_splits[dim]; SPLITS_TYPE last_split = (dim == params_nested_splits.size() - 1) ? num_params_dense_values : params_nested_splits[dim + 1].size(); if (splits.size() == 0) { return errors::InvalidArgument("Ragged splits may not be empty"); } if (splits(0) < 0) { return errors::InvalidArgument("Ragged splits must be non-negative"); } if (splits(splits.size() - 1) > last_split) { return errors::InvalidArgument( "Ragged splits must not point past values"); } for (int i = 1; i < splits.size(); ++i) { if (splits(i - 1) > splits(i)) { return errors::InvalidArgument("Ragged splits must be sorted"); } } } return absl::OkStatus(); } ::tensorflow::Status WriteSplits( const std::vector<std::vector<SPLITS_TYPE>>& out_splits, OpKernelContext* context) { OpOutputList splits_out; TF_RETURN_IF_ERROR( context->output_list("output_nested_splits", &splits_out)); for (int i = 0; i < out_splits.size(); ++i) { Tensor* splits; SPLITS_TYPE num_splits = out_splits[i].size(); TF_RETURN_IF_ERROR( splits_out.allocate(i, TensorShape({num_splits}), &splits)); auto splits_flat = splits->flat<SPLITS_TYPE>(); std::copy_n(out_splits[i].data(), out_splits[i].size(), splits_flat.data()); } return absl::OkStatus(); } ::tensorflow::Status WriteValues( const Tensor& params_dense_values_in, const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices, int values_index, SPLITS_TYPE num_values, OpKernelContext* context) const { Tensor* values_out = nullptr; TensorShape values_shape = params_dense_values_in.shape(); values_shape.set_dim(0, num_values); TF_RETURN_IF_ERROR( context->allocate_output(values_index, values_shape, &values_out)); const SPLITS_TYPE num_elements = params_dense_values_in.NumElements(); const SPLITS_TYPE value_size = num_elements == 0 ? 0 : (num_elements / params_dense_values_in.dim_size(0)); CallWriteValueSlices(params_dense_values_in, value_slices, value_size, values_out); return absl::OkStatus(); } protected: virtual void CallWriteValueSlices( const Tensor& params_dense_values_in, const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices, SPLITS_TYPE value_size, Tensor* values_out) const = 0; }; template <typename INDEX_TYPE, typename VALUE_TYPE, typename SPLITS_TYPE> class RaggedGatherOp : public RaggedGatherOpBase<INDEX_TYPE, SPLITS_TYPE> { public: using RaggedGatherOpBase<INDEX_TYPE, SPLITS_TYPE>::RaggedGatherOpBase; private: void CallWriteValueSlices( const Tensor& params_dense_values_in, const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices, SPLITS_TYPE value_size, Tensor* values_out) const override { WriteValueSlices<VALUE_TYPE>(params_dense_values_in, value_slices, value_size, values_out); } }; #define REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(index_type, value_type, \ splits_type) \ REGISTER_KERNEL_BUILDER( \ Name("RaggedGather") \ .Device(DEVICE_CPU) \ .TypeConstraint<index_type>("Tindices") \ .TypeConstraint<value_type>("Tvalues") \ .TypeConstraint<splits_type>("Tsplits"), \ RaggedGatherOp<index_type, value_type, splits_type>); #define REGISTER_CPU_KERNEL(value_type) \ REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int32, value_type, int32) \ REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int64_t, value_type, int32) \ REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int32, value_type, int64_t) \ REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int64_t, value_type, int64_t) TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL); TF_CALL_tstring(REGISTER_CPU_KERNEL); TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL); TF_CALL_quint16(REGISTER_CPU_KERNEL); TF_CALL_qint16(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL #undef REGISTER_CPU_KERNEL_WITH_INDEX_TYPE }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class RaggedGatherOpTest : public ::tensorflow::OpsTestBase { protected: template <typename VALUE_TYPE, typename INDEX_TYPE> void BuildRaggedGatherGraph( const TensorShape& indices_shape, const std::vector<INDEX_TYPE>& indices, const std::vector<std::vector<int64_t>>& params_nested_splits, const TensorShape& params_dense_values_shape, const gtl::ArraySlice<VALUE_TYPE> params_dense_values) { const auto& value_dtype = DataTypeToEnum<VALUE_TYPE>::v(); const auto& index_dtype = DataTypeToEnum<INDEX_TYPE>::v(); int64_t PARAMS_RAGGED_RANK = params_nested_splits.size(); int64_t num_splits = PARAMS_RAGGED_RANK + indices_shape.dims() - 1; TF_ASSERT_OK( NodeDefBuilder("tested_op", "RaggedGather") .Input(FakeInput(PARAMS_RAGGED_RANK)) .Input(FakeInput(value_dtype)) .Input(FakeInput(index_dtype)) .Attr("PARAMS_RAGGED_RANK", PARAMS_RAGGED_RANK) .Attr("OUTPUT_RAGGED_RANK", num_splits) .Attr("Tvalues", value_dtype) .Attr("Tindices", index_dtype) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); for (const auto& splits : params_nested_splits) { int64_t splits_size = splits.size(); AddInputFromArray<int64_t>(TensorShape({splits_size}), splits); } AddInputFromArray<VALUE_TYPE>(params_dense_values_shape, params_dense_values); AddInputFromArray<INDEX_TYPE>(indices_shape, indices); } }; TEST_F(RaggedGatherOpTest, RaggedGather) { BuildRaggedGatherGraph<float, int32>( TensorShape({4}), {2, 1, 0, 3}, {{0, 3, 3, 7, 9}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), test::AsTensor<int64_t>({0, 4, 4, 7, 9})); test::ExpectTensorNear<float>( *GetOutput(1), test::AsTensor<float>({.4, .5, .6, .7, .1, .2, .3, .8, .9}), 0.1); } TEST_F(RaggedGatherOpTest, RaggedGather_3DParams) { BuildRaggedGatherGraph<float, int32>( TensorShape({5}), {2, 1, 0, 2, 3}, {{0, 1, 3, 3, 5, 6}, {0, 0, 2, 3, 5, 8, 9}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), test::AsTensor<int64_t>({0, 0, 2, 3, 3, 5})); test::ExpectTensorEqual<int64_t>(*GetOutput(1), test::AsTensor<int64_t>({0, 2, 3, 3, 5, 8})); test::ExpectTensorNear<float>( *GetOutput(2), test::AsTensor<float>({.1, .2, .3, .4, .5, .6, .7, .8}), 0.1); } TEST_F(RaggedGatherOpTest, RaggedGather_4DParams) { BuildRaggedGatherGraph<int32, int32>( TensorShape({4}), {2, 1, 0, 2}, {{0, 1, 3, 3}, {0, 0, 3, 4}}, TensorShape({4, 2}), {1, 2, 3, 4, 5, 6, 7, 8} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), test::AsTensor<int64_t>({0, 0, 2, 3, 3})); test::ExpectTensorEqual<int64_t>(*GetOutput(1), test::AsTensor<int64_t>({0, 3, 4, 4})); test::ExpectTensorEqual<int32>( *GetOutput(2), test::AsTensor<int32>({1, 2, 3, 4, 5, 6, 7, 8}, TensorShape({4, 2}))); } TEST_F(RaggedGatherOpTest, RaggedGather_2DIndices) { BuildRaggedGatherGraph<float, int32>( TensorShape({2, 2}), {2, 1, 0, 3}, {{0, 3, 3, 7, 9}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), test::AsTensor<int64_t>({0, 2, 4})); test::ExpectTensorEqual<int64_t>(*GetOutput(1), test::AsTensor<int64_t>({0, 4, 4, 7, 9})); test::ExpectTensorNear<float>( *GetOutput(2), test::AsTensor<float>({.4, .5, .6, .7, .1, .2, .3, .8, .9}), 0.1); } TEST_F(RaggedGatherOpTest, RaggedGather_ScalarIndices) { BuildRaggedGatherGraph<float, int32>( TensorShape({}), {2}, {{0, 3, 3, 7, 9}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>(*GetOutput(0), test::AsTensor<float>({.4, .5, .6, .7}), 0.1); } TEST_F(RaggedGatherOpTest, RaggedGather_OutOfBounds) { BuildRaggedGatherGraph<float, int32>( TensorShape({2}), {2, 10}, {{0, 3, 3, 7, 9}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); EXPECT_EQ("indices[1] = 10 is not in [0, 4)", RunOpKernel().message()); } TEST_F(RaggedGatherOpTest, InvalidSplitsNotSorted) { BuildRaggedGatherGraph<float, int32>( TensorShape({2}), {0, 2}, {{0, 3, 5, 2, 9}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); EXPECT_EQ("Ragged splits must be sorted", RunOpKernel().message()); } TEST_F(RaggedGatherOpTest, InvalidSplitsNegative) { BuildRaggedGatherGraph<float, int32>( TensorShape({2}), {0, 2}, {{-1, 3, 2, 7, 9}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); EXPECT_EQ("Ragged splits must be non-negative", RunOpKernel().message()); } TEST_F(RaggedGatherOpTest, InvalidSplitsEmpty) { BuildRaggedGatherGraph<float, int32>( TensorShape({0}), {}, {{}}, TensorShape({0}), {} ); EXPECT_EQ("Ragged splits may not be empty", RunOpKernel().message()); } TEST_F(RaggedGatherOpTest, InvalidSplitsTooBig) { BuildRaggedGatherGraph<float, int32>( TensorShape({2}), {0, 2}, {{0, 20, 40, 80, 100}}, TensorShape({9}), {.1, .2, .3, .4, .5, .6, .7, .8, .9} ); EXPECT_EQ("Ragged splits must not point past values", RunOpKernel().message()); } TEST_F(RaggedGatherOpTest, BadValuesShape) { BuildRaggedGatherGraph<float, int32>( TensorShape({0}), {}, {{0}}, TensorShape({}), {.1} ); EXPECT_EQ("params.rank must be nonzero", RunOpKernel().message()); } TEST_F(RaggedGatherOpTest, ShapeFn) { ShapeInferenceTestOp op("RaggedGather"); (*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(1); (*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(1); INFER_OK(op, "?;?;?", "[?];?"); INFER_OK(op, "[?];[?];[?]", "[?];[?]"); INFER_OK(op, "[?];[?,?,?];[?]", "[?];[?,d1_1,d1_2]"); INFER_OK(op, "[5];[10];[15]", "[?];[?]"); INFER_OK(op, "[5];[10,2];[15]", "[?];[?,d1_1]"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[5];[];[]"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,2];[];[5]"); (*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(2); (*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(2); INFER_OK(op, "?;?;?;?", "[?];[?];?"); INFER_OK(op, "[?];[?];[?];[?]", "[?];[?];[?]"); INFER_OK(op, "[?];[?];[?,?,?];[?]", "[?];[?];[?,d2_1,d2_2]"); INFER_OK(op, "[5];[10];[15];[20]", "[?];[?];[?]"); (*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(1); (*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(2); INFER_OK(op, "?;?;?", "[?];[?];?"); INFER_OK(op, "[?];[?];[?,?]", "[?];[?];[?]"); INFER_OK(op, "[?];[?,?,?];[?,?]", "[?];[?];[?,d1_1,d1_2]"); INFER_OK(op, "[15];[20];[5,10]", "[?];[?];[?]"); INFER_OK(op, "[15];[20,2];[5,10]", "[?];[?];[?,d1_1]"); (*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(1); (*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(0); INFER_OK(op, "[?];[?];[]", "[?]"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_gather_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_gather_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea